Пример #1
0
func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
	// lxc is special in that we cannot create the master outside of the container without
	// opening the slave because we have nothing to provide to the cmd.  We have to open both then do
	// the crazy setup on command right now instead of passing the console path to lxc and telling it
	// to open up that console.  we save a couple of openfiles in the native driver because we can do
	// this.
	ptyMaster, ptySlave, err := pty.Open()
	if err != nil {
		return nil, err
	}

	tty := &TtyConsole{
		MasterPty: ptyMaster,
		SlavePty:  ptySlave,
	}

	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
		tty.Close()
		return nil, err
	}

	command.Console = tty.SlavePty.Name()

	return tty, nil
}
Пример #2
0
func (d *driver) generateEnvConfig(c *execdriver.Command) error {
	data, err := json.Marshal(c.Env)
	if err != nil {
		return err
	}
	p := path.Join(d.root, "containers", c.ID, "config.env")
	c.Mounts = append(c.Mounts, execdriver.Mount{p, "/.dockerenv", false, true})

	return ioutil.WriteFile(p, data, 0600)
}
Пример #3
0
// callback ensures that the container's state is properly updated after we
// received ack from the execution drivers
func (m *containerMonitor) callback(command *execdriver.Command) {
	if command.Tty {
		// The callback is called after the process Start()
		// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
		// which we close here.
		if c, ok := command.Stdout.(io.Closer); ok {
			c.Close()
		}
	}

	m.container.State.SetRunning(command.Pid())

	if m.startSignal != nil {
		// signal that the process has started
		close(m.startSignal)
		m.startSignal = nil
	}

	if err := m.container.ToDisk(); err != nil {
		log.Debugf("%s", err)
	}
}
Пример #4
0
func (d *driver) generateEnvConfig(c *execdriver.Command) error {
	data, err := json.Marshal(c.ProcessConfig.Env)
	if err != nil {
		return err
	}
	p := path.Join(d.root, "containers", c.ID, "config.env")
	c.Mounts = append(c.Mounts, execdriver.Mount{
		Source:      p,
		Destination: "/.dockerenv",
		Writable:    false,
		Private:     true,
	})

	return ioutil.WriteFile(p, data, 0600)
}
Пример #5
0
func NewTtyConsole(command *execdriver.Command, pipes *execdriver.Pipes) (*TtyConsole, error) {
	ptyMaster, console, err := system.CreateMasterAndConsole()
	if err != nil {
		return nil, err
	}

	tty := &TtyConsole{
		MasterPty: ptyMaster,
	}

	if err := tty.AttachPipes(&command.Cmd, pipes); err != nil {
		tty.Close()
		return nil, err
	}

	command.Console = console

	return tty, nil
}
Пример #6
0
Файл: run.go Проект: remh/docker
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {

	var (
		term                           execdriver.Terminal
		err                            error
		inListen, outListen, errListen *npipe.PipeListener
	)

	// Make sure the client isn't asking for options which aren't supported
	err = checkSupportedOptions(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	cu := &containerInit{
		SystemType:              "Container",
		Name:                    c.ID,
		IsDummy:                 dummyMode,
		VolumePath:              c.Rootfs,
		IgnoreFlushesDuringBoot: c.FirstStart,
		LayerFolderPath:         c.LayerFolder,
	}

	for i := 0; i < len(c.LayerPaths); i++ {
		cu.Layers = append(cu.Layers, layer{
			ID:   hcsshim.NewGUID(c.LayerPaths[i]).ToString(),
			Path: c.LayerPaths[i],
		})
	}

	// TODO Windows. At some point, when there is CLI on docker run to
	// enable the IP Address of the container to be passed into docker run,
	// the IP Address needs to be wired through to HCS in the JSON. It
	// would be present in c.Network.Interface.IPAddress. See matching
	// TODO in daemon\container_windows.go, function populateCommand.

	if c.Network.Interface != nil {

		var pbs []portBinding

		// Enumerate through the port bindings specified by the user and convert
		// them into the internal structure matching the JSON blob that can be
		// understood by the HCS.
		for i, v := range c.Network.Interface.PortBindings {
			proto := strings.ToUpper(i.Proto())
			if proto != "TCP" && proto != "UDP" {
				return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
			}

			if len(v) > 1 {
				return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
			}

			for _, v2 := range v {
				var (
					iPort, ePort int
					err          error
				)
				if len(v2.HostIP) != 0 {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
				}
				if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
				}
				if iPort, err = strconv.Atoi(i.Port()); err != nil {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
				}
				if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
				}
				pbs = append(pbs,
					portBinding{ExternalPort: ePort,
						InternalPort: iPort,
						Protocol:     proto})
			}
		}

		// TODO Windows: TP3 workaround. Allow the user to override the name of
		// the Container NAT device through an environment variable. This will
		// ultimately be a global daemon parameter on Windows, similar to -b
		// for the name of the virtual switch (aka bridge).
		cn := os.Getenv("DOCKER_CONTAINER_NAT")
		if len(cn) == 0 {
			cn = defaultContainerNAT
		}

		dev := device{
			DeviceType: "Network",
			Connection: &networkConnection{
				NetworkName: c.Network.Interface.Bridge,
				// TODO Windows: Fixme, next line. Needs HCS fix.
				EnableNat: false,
				Nat: natSettings{
					Name:         cn,
					PortBindings: pbs,
				},
			},
		}

		if c.Network.Interface.MacAddress != "" {
			windowsStyleMAC := strings.Replace(
				c.Network.Interface.MacAddress, ":", "-", -1)
			dev.Settings = networkSettings{
				MacAddress: windowsStyleMAC,
			}
		}
		cu.Devices = append(cu.Devices, dev)
	} else {
		logrus.Debugln("No network interface")
	}

	configurationb, err := json.Marshal(cu)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	configuration := string(configurationb)

	err = hcsshim.CreateComputeSystem(c.ID, configuration)
	if err != nil {
		logrus.Debugln("Failed to create temporary container ", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Start the container
	logrus.Debugln("Starting container ", c.ID)
	err = hcsshim.StartComputeSystem(c.ID)
	if err != nil {
		logrus.Errorf("Failed to start compute system: %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer func() {
		// Stop the container

		if terminateMode {
			logrus.Debugf("Terminating container %s", c.ID)
			if err := hcsshim.TerminateComputeSystem(c.ID); err != nil {
				// IMPORTANT: Don't fail if fails to change state. It could already
				// have been stopped through kill().
				// Otherwise, the docker daemon will hang in job wait()
				logrus.Warnf("Ignoring error from TerminateComputeSystem %s", err)
			}
		} else {
			logrus.Debugf("Shutting down container %s", c.ID)
			if err := hcsshim.ShutdownComputeSystem(c.ID); err != nil {
				// IMPORTANT: Don't fail if fails to change state. It could already
				// have been stopped through kill().
				// Otherwise, the docker daemon will hang in job wait()
				logrus.Warnf("Ignoring error from ShutdownComputeSystem %s", err)
			}
		}
	}()

	// We use a different pipe name between real and dummy mode in the HCS
	var serverPipeFormat, clientPipeFormat string
	if dummyMode {
		clientPipeFormat = `\\.\pipe\docker-run-%[1]s-%[2]s`
		serverPipeFormat = clientPipeFormat
	} else {
		clientPipeFormat = `\\.\pipe\docker-run-%[2]s`
		serverPipeFormat = `\\.\Containers\%[1]s\Device\NamedPipe\docker-run-%[2]s`
	}

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   c.ProcessConfig.Tty,
		WorkingDirectory: c.WorkingDir,
		ConsoleSize:      c.ProcessConfig.ConsoleSize,
	}

	// Configure the environment for the process
	createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env)

	// Connect stdin
	if pipes.Stdin != nil {
		stdInPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdin")
		createProcessParms.StdInPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdin")

		// Listen on the named pipe
		inListen, err = npipe.Listen(stdInPipe)
		if err != nil {
			logrus.Errorf("stdin failed to listen on %s err=%s", stdInPipe, err)
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		defer inListen.Close()

		// Launch a goroutine to do the accept. We do this so that we can
		// cause an otherwise blocking goroutine to gracefully close when
		// the caller (us) closes the listener
		go stdinAccept(inListen, stdInPipe, pipes.Stdin)
	}

	// Connect stdout
	stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdout")
	createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdout")

	outListen, err = npipe.Listen(stdOutPipe)
	if err != nil {
		logrus.Errorf("stdout failed to listen on %s err=%s", stdOutPipe, err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer outListen.Close()
	go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout)

	// No stderr on TTY.
	if !c.ProcessConfig.Tty {
		// Connect stderr
		stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stderr")
		createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stderr")
		errListen, err = npipe.Listen(stdErrPipe)
		if err != nil {
			logrus.Errorf("stderr failed to listen on %s err=%s", stdErrPipe, err)
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		defer errListen.Close()
		go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr)
	}

	// This should get caught earlier, but just in case - validate that we
	// have something to run
	if c.ProcessConfig.Entrypoint == "" {
		err = errors.New("No entrypoint specified")
		logrus.Error(err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Build the command line of the process
	createProcessParms.CommandLine = c.ProcessConfig.Entrypoint
	for _, arg := range c.ProcessConfig.Arguments {
		logrus.Debugln("appending ", arg)
		createProcessParms.CommandLine += " " + arg
	}
	logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine)

	// Start the command running in the container.
	var pid uint32
	pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms)

	if err != nil {
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	//Save the PID as we'll need this in Kill()
	logrus.Debugf("PID %d", pid)
	c.ContainerPid = int(pid)

	if c.ProcessConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	c.ProcessConfig.Terminal = term

	// Maintain our list of active containers. We'll need this later for exec
	// and other commands.
	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		command: c,
	}
	d.Unlock()

	// Invoke the start callback
	if startCallback != nil {
		startCallback(&c.ProcessConfig, int(pid))
	}

	var exitCode int32
	exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid)
	if err != nil {
		logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
	return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}
Пример #7
0
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	var (
		term execdriver.Terminal
		err  error
	)

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	c.ProcessConfig.Terminal = term

	c.Mounts = append(c.Mounts, execdriver.Mount{
		Source:      d.initPath,
		Destination: c.InitPath,
		Writable:    false,
		Private:     true,
	})

	if err := d.generateEnvConfig(c); err != nil {
		return -1, err
	}
	configPath, err := d.generateLXCConfig(c)
	if err != nil {
		return -1, err
	}
	params := []string{
		"lxc-start",
		"-n", c.ID,
		"-f", configPath,
		"--",
		c.InitPath,
	}

	if c.Network.Interface != nil {
		params = append(params,
			"-g", c.Network.Interface.Gateway,
			"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
		)
	}
	params = append(params,
		"-mtu", strconv.Itoa(c.Network.Mtu),
	)

	if c.ProcessConfig.User != "" {
		params = append(params, "-u", c.ProcessConfig.User)
	}

	if c.ProcessConfig.Privileged {
		if d.apparmor {
			params[0] = path.Join(d.root, "lxc-start-unconfined")

		}
		params = append(params, "-privileged")
	}

	if c.WorkingDir != "" {
		params = append(params, "-w", c.WorkingDir)
	}

	if len(c.CapAdd) > 0 {
		params = append(params, fmt.Sprintf("-cap-add=%s", strings.Join(c.CapAdd, ":")))
	}

	if len(c.CapDrop) > 0 {
		params = append(params, fmt.Sprintf("-cap-drop=%s", strings.Join(c.CapDrop, ":")))
	}

	params = append(params, "--", c.ProcessConfig.Entrypoint)
	params = append(params, c.ProcessConfig.Arguments...)

	if d.sharedRoot {
		// lxc-start really needs / to be non-shared, or all kinds of stuff break
		// when lxc-start unmount things and those unmounts propagate to the main
		// mount namespace.
		// What we really want is to clone into a new namespace and then
		// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
		// without exec in go we have to do this horrible shell hack...
		shellString :=
			"mount --make-rslave /; exec " +
				utils.ShellQuoteArguments(params)

		params = []string{
			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
		}
	}

	var (
		name = params[0]
		arg  = params[1:]
	)
	aname, err := exec.LookPath(name)
	if err != nil {
		aname = name
	}
	c.ProcessConfig.Path = aname
	c.ProcessConfig.Args = append([]string{name}, arg...)

	if err := nodes.CreateDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
		return -1, err
	}

	if err := c.ProcessConfig.Start(); err != nil {
		return -1, err
	}

	var (
		waitErr  error
		waitLock = make(chan struct{})
	)

	go func() {
		if err := c.ProcessConfig.Wait(); err != nil {
			if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
				waitErr = err
			}
		}
		close(waitLock)
	}()

	// Poll lxc for RUNNING status
	pid, err := d.waitForStart(c, waitLock)
	if err != nil {
		if c.ProcessConfig.Process != nil {
			c.ProcessConfig.Process.Kill()
			c.ProcessConfig.Wait()
		}
		return -1, err
	}

	c.ContainerPid = pid

	if startCallback != nil {
		startCallback(&c.ProcessConfig, pid)
	}

	<-waitLock

	return getExitCode(c), waitErr
}
Пример #8
0
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
	var (
		term     execdriver.Terminal
		err      error
		dataPath = d.containerDir(c.ID)
	)

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	c.ProcessConfig.Terminal = term
	container, err := d.createContainer(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	c.Mounts = append(c.Mounts, execdriver.Mount{
		Source:      d.initPath,
		Destination: c.InitPath,
		Writable:    false,
		Private:     true,
	})

	if err := d.generateEnvConfig(c); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	configPath, err := d.generateLXCConfig(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	params := []string{
		"lxc-start",
		"-n", c.ID,
		"-f", configPath,
	}

	// From lxc>=1.1 the default behavior is to daemonize containers after start
	lxcVersion := version.Version(d.version())
	if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) {
		params = append(params, "-F")
	}

	if c.Network.ContainerID != "" {
		params = append(params,
			"--share-net", c.Network.ContainerID,
		)
	}
	if c.Ipc != nil {
		if c.Ipc.ContainerID != "" {
			params = append(params,
				"--share-ipc", c.Ipc.ContainerID,
			)
		} else if c.Ipc.HostIpc {
			params = append(params,
				"--share-ipc", "1",
			)
		}
	}

	params = append(params,
		"--",
		c.InitPath,
	)
	if c.Network.Interface != nil {
		params = append(params,
			"-g", c.Network.Interface.Gateway,
			"-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen),
		)
	}
	params = append(params,
		"-mtu", strconv.Itoa(c.Network.Mtu),
	)

	if c.ProcessConfig.User != "" {
		params = append(params, "-u", c.ProcessConfig.User)
	}

	if c.ProcessConfig.Privileged {
		if d.apparmor {
			params[0] = path.Join(d.root, "lxc-start-unconfined")

		}
		params = append(params, "-privileged")
	}

	if c.WorkingDir != "" {
		params = append(params, "-w", c.WorkingDir)
	}

	params = append(params, "--", c.ProcessConfig.Entrypoint)
	params = append(params, c.ProcessConfig.Arguments...)

	if d.sharedRoot {
		// lxc-start really needs / to be non-shared, or all kinds of stuff break
		// when lxc-start unmount things and those unmounts propagate to the main
		// mount namespace.
		// What we really want is to clone into a new namespace and then
		// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
		// without exec in go we have to do this horrible shell hack...
		shellString :=
			"mount --make-rslave /; exec " +
				utils.ShellQuoteArguments(params)

		params = []string{
			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
		}
	}
	logrus.Debugf("lxc params %s", params)
	var (
		name = params[0]
		arg  = params[1:]
	)
	aname, err := exec.LookPath(name)
	if err != nil {
		aname = name
	}
	c.ProcessConfig.Path = aname
	c.ProcessConfig.Args = append([]string{name}, arg...)

	if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	if err := c.ProcessConfig.Start(); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	var (
		waitErr  error
		waitLock = make(chan struct{})
	)

	go func() {
		if err := c.ProcessConfig.Wait(); err != nil {
			if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
				waitErr = err
			}
		}
		close(waitLock)
	}()

	terminate := func(terr error) (execdriver.ExitStatus, error) {
		if c.ProcessConfig.Process != nil {
			c.ProcessConfig.Process.Kill()
			c.ProcessConfig.Wait()
		}
		return execdriver.ExitStatus{ExitCode: -1}, terr
	}
	// Poll lxc for RUNNING status
	pid, err := d.waitForStart(c, waitLock)
	if err != nil {
		return terminate(err)
	}

	cgroupPaths, err := cgroupPaths(c.ID)
	if err != nil {
		return terminate(err)
	}

	state := &libcontainer.State{
		InitProcessPid: pid,
		CgroupPaths:    cgroupPaths,
	}

	f, err := os.Create(filepath.Join(dataPath, "state.json"))
	if err != nil {
		return terminate(err)
	}
	defer f.Close()

	if err := json.NewEncoder(f).Encode(state); err != nil {
		return terminate(err)
	}

	c.ContainerPid = pid

	if startCallback != nil {
		logrus.Debugf("Invoking startCallback")
		startCallback(&c.ProcessConfig, pid)
	}

	oomKill := false
	oomKillNotification, err := notifyOnOOM(cgroupPaths)

	<-waitLock

	if err == nil {
		_, oomKill = <-oomKillNotification
		logrus.Debugf("oomKill error %s waitErr %s", oomKill, waitErr)
	} else {
		logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
	}

	// check oom error
	exitCode := getExitCode(c)
	if oomKill {
		exitCode = 137
	}
	return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
}
Пример #9
0
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	// take the Command and populate the libcontainer.Config from it
	container, err := d.createContainer(c)
	if err != nil {
		return -1, err
	}

	var term execdriver.Terminal

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	if err != nil {
		return -1, err
	}
	c.ProcessConfig.Terminal = term

	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	var (
		dataPath = filepath.Join(d.root, c.ID)
		args     = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
	)

	if err := d.createContainerRoot(c.ID); err != nil {
		return -1, err
	}
	defer d.removeContainerRoot(c.ID)

	if err := d.writeContainerFile(container, c.ID); err != nil {
		return -1, err
	}

	return namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, c.Rootfs, dataPath, args, func(container *libcontainer.Config, console, rootfs, dataPath, init string, child *os.File, args []string) *exec.Cmd {
		c.ProcessConfig.Path = d.initPath
		c.ProcessConfig.Args = append([]string{
			DriverName,
			"-console", console,
			"-pipe", "3",
			"-root", filepath.Join(d.root, c.ID),
			"--",
		}, args...)

		// set this to nil so that when we set the clone flags anything else is reset
		c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
			Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
		}
		c.ProcessConfig.ExtraFiles = []*os.File{child}

		c.ProcessConfig.Env = container.Env
		c.ProcessConfig.Dir = c.Rootfs

		return &c.ProcessConfig.Cmd
	}, func() {
		if startCallback != nil {
			c.ContainerPid = c.ProcessConfig.Process.Pid
			startCallback(&c.ProcessConfig, c.ContainerPid)
		}
	})
}
Пример #10
0
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
	// take the Command and populate the libcontainer.Config from it
	container, err := d.createContainer(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	var term execdriver.Terminal

	if c.ProcessConfig.Tty {
		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
	} else {
		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
	}
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	c.ProcessConfig.Terminal = term

	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		container: container,
		cmd:       &c.ProcessConfig.Cmd,
	}
	d.Unlock()

	var (
		dataPath = filepath.Join(d.root, c.ID)
		args     = append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...)
	)

	if err := d.createContainerRoot(c.ID); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer d.cleanContainer(c.ID)

	if err := d.writeContainerFile(container, c.ID); err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	execOutputChan := make(chan execOutput, 1)
	waitForStart := make(chan struct{})

	go func() {
		exitCode, err := namespaces.Exec(container, c.ProcessConfig.Stdin, c.ProcessConfig.Stdout, c.ProcessConfig.Stderr, c.ProcessConfig.Console, dataPath, args, func(container *libcontainer.Config, console, dataPath, init string, child *os.File, args []string) *exec.Cmd {
			c.ProcessConfig.Path = d.initPath
			c.ProcessConfig.Args = append([]string{
				DriverName,
				"-console", console,
				"-pipe", "3",
				"-root", filepath.Join(d.root, c.ID),
				"--",
			}, args...)

			// set this to nil so that when we set the clone flags anything else is reset
			c.ProcessConfig.SysProcAttr = &syscall.SysProcAttr{
				Cloneflags: uintptr(namespaces.GetNamespaceFlags(container.Namespaces)),
			}
			c.ProcessConfig.ExtraFiles = []*os.File{child}

			c.ProcessConfig.Env = container.Env
			c.ProcessConfig.Dir = container.RootFs

			return &c.ProcessConfig.Cmd
		}, func() {
			close(waitForStart)
			if startCallback != nil {
				c.ContainerPid = c.ProcessConfig.Process.Pid
				startCallback(&c.ProcessConfig, c.ContainerPid)
			}
		})
		execOutputChan <- execOutput{exitCode, err}
	}()

	select {
	case execOutput := <-execOutputChan:
		return execdriver.ExitStatus{ExitCode: execOutput.exitCode}, execOutput.err
	case <-waitForStart:
		break
	}

	oomKill := false
	state, err := libcontainer.GetState(filepath.Join(d.root, c.ID))
	if err == nil {
		oomKillNotification, err := libcontainer.NotifyOnOOM(state)
		if err == nil {
			_, oomKill = <-oomKillNotification
		} else {
			log.Warnf("WARNING: Your kernel does not support OOM notifications: %s", err)
		}
	} else {
		log.Warnf("Failed to get container state, oom notify will not work: %s", err)
	}
	// wait for the container to exit.
	execOutput := <-execOutputChan

	return execdriver.ExitStatus{ExitCode: execOutput.exitCode, OOMKilled: oomKill}, execOutput.err
}
Пример #11
0
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {

	var (
		term execdriver.Terminal
		err  error
	)

	cu := &containerInit{
		SystemType:              "Container",
		Name:                    c.ID,
		Owner:                   defaultOwner,
		IsDummy:                 dummyMode,
		VolumePath:              c.Rootfs,
		IgnoreFlushesDuringBoot: c.FirstStart,
		LayerFolderPath:         c.LayerFolder,
		ProcessorWeight:         c.Resources.CPUShares,
		HostName:                c.Hostname,
	}

	cu.HvPartition = c.HvPartition

	if cu.HvPartition {
		cu.SandboxPath = filepath.Dir(c.LayerFolder)
	} else {
		cu.VolumePath = c.Rootfs
		cu.LayerFolderPath = c.LayerFolder
	}

	for _, layerPath := range c.LayerPaths {
		_, filename := filepath.Split(layerPath)
		g, err := hcsshim.NameToGuid(filename)
		if err != nil {
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		cu.Layers = append(cu.Layers, layer{
			ID:   g.ToString(),
			Path: layerPath,
		})
	}

	// Add the mounts (volumes, bind mounts etc) to the structure
	mds := make([]mappedDir, len(c.Mounts))
	for i, mount := range c.Mounts {
		mds[i] = mappedDir{
			HostPath:      mount.Source,
			ContainerPath: mount.Destination,
			ReadOnly:      !mount.Writable}
	}
	cu.MappedDirectories = mds

	// TODO Windows. At some point, when there is CLI on docker run to
	// enable the IP Address of the container to be passed into docker run,
	// the IP Address needs to be wired through to HCS in the JSON. It
	// would be present in c.Network.Interface.IPAddress. See matching
	// TODO in daemon\container_windows.go, function populateCommand.

	if c.Network.Interface != nil {

		var pbs []portBinding

		// Enumerate through the port bindings specified by the user and convert
		// them into the internal structure matching the JSON blob that can be
		// understood by the HCS.
		for i, v := range c.Network.Interface.PortBindings {
			proto := strings.ToUpper(i.Proto())
			if proto != "TCP" && proto != "UDP" {
				return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
			}

			if len(v) > 1 {
				return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
			}

			for _, v2 := range v {
				var (
					iPort, ePort int
					err          error
				)
				if len(v2.HostIP) != 0 {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
				}
				if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
				}
				if iPort, err = strconv.Atoi(i.Port()); err != nil {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
				}
				if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
				}
				pbs = append(pbs,
					portBinding{ExternalPort: ePort,
						InternalPort: iPort,
						Protocol:     proto})
			}
		}

		// TODO Windows: TP3 workaround. Allow the user to override the name of
		// the Container NAT device through an environment variable. This will
		// ultimately be a global daemon parameter on Windows, similar to -b
		// for the name of the virtual switch (aka bridge).
		cn := os.Getenv("DOCKER_CONTAINER_NAT")
		if len(cn) == 0 {
			cn = defaultContainerNAT
		}

		dev := device{
			DeviceType: "Network",
			Connection: &networkConnection{
				NetworkName: c.Network.Interface.Bridge,
				// TODO Windows: Fixme, next line. Needs HCS fix.
				EnableNat: false,
				Nat: natSettings{
					Name:         cn,
					PortBindings: pbs,
				},
			},
		}

		if c.Network.Interface.MacAddress != "" {
			windowsStyleMAC := strings.Replace(
				c.Network.Interface.MacAddress, ":", "-", -1)
			dev.Settings = networkSettings{
				MacAddress: windowsStyleMAC,
			}
		}
		cu.Devices = append(cu.Devices, dev)
	} else {
		logrus.Debugln("No network interface")
	}

	configurationb, err := json.Marshal(cu)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	configuration := string(configurationb)

	// TODO Windows TP5 timeframe. Remove when TP4 is no longer supported.
	// The following a workaround for Windows TP4 which has a networking
	// bug which fairly frequently returns an error. Back off and retry.
	maxAttempts := 1
	if TP4RetryHack {
		maxAttempts = 5
	}
	i := 0
	for i < maxAttempts {
		i++
		err = hcsshim.CreateComputeSystem(c.ID, configuration)
		if err != nil {
			if TP4RetryHack {
				if !strings.Contains(err.Error(), `Win32 API call returned error r1=0x800401f3`) && // Invalid class string
					!strings.Contains(err.Error(), `Win32 API call returned error r1=0x80070490`) && // Element not found
					!strings.Contains(err.Error(), `Win32 API call returned error r1=0x80070002`) && // The system cannot find the file specified
					!strings.Contains(err.Error(), `Win32 API call returned error r1=0x800704c6`) && // The network is not present or not started
					!strings.Contains(err.Error(), `Win32 API call returned error r1=0x800700a1`) { // The specified path is invalid
					logrus.Debugln("Failed to create temporary container ", err)
					return execdriver.ExitStatus{ExitCode: -1}, err
				}
				logrus.Warnf("Invoking Windows TP4 retry hack (%d of %d)", i, maxAttempts-1)
				time.Sleep(50 * time.Millisecond)
			}
		} else {
			break
		}
	}

	// Start the container
	logrus.Debugln("Starting container ", c.ID)
	err = hcsshim.StartComputeSystem(c.ID)
	if err != nil {
		logrus.Errorf("Failed to start compute system: %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer func() {
		// Stop the container
		if forceKill {
			logrus.Debugf("Forcibly terminating container %s", c.ID)
			if errno, err := hcsshim.TerminateComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
				logrus.Warnf("Ignoring error from TerminateComputeSystem 0x%X %s", errno, err)
			}
		} else {
			logrus.Debugf("Shutting down container %s", c.ID)
			if errno, err := hcsshim.ShutdownComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
				if errno != hcsshim.Win32SystemShutdownIsInProgress &&
					errno != hcsshim.Win32SpecifiedPathInvalid &&
					errno != hcsshim.Win32SystemCannotFindThePathSpecified {
					logrus.Warnf("Ignoring error from ShutdownComputeSystem 0x%X %s", errno, err)
				}
			}
		}
	}()

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   c.ProcessConfig.Tty,
		WorkingDirectory: c.WorkingDir,
		ConsoleSize:      c.ProcessConfig.ConsoleSize,
	}

	// Configure the environment for the process
	createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env)

	createProcessParms.CommandLine, err = createCommandLine(&c.ProcessConfig, c.ArgsEscaped)

	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Start the command running in the container.
	pid, stdin, stdout, stderr, _, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms)
	if err != nil {
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Now that the process has been launched, begin copying data to and from
	// the named pipes for the std handles.
	setupPipes(stdin, stdout, stderr, pipes)

	//Save the PID as we'll need this in Kill()
	logrus.Debugf("PID %d", pid)
	c.ContainerPid = int(pid)

	if c.ProcessConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	c.ProcessConfig.Terminal = term

	// Maintain our list of active containers. We'll need this later for exec
	// and other commands.
	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		command: c,
	}
	d.Unlock()

	if hooks.Start != nil {
		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
	}

	var (
		exitCode int32
		errno    uint32
	)
	exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite)
	if err != nil {
		if errno != hcsshim.Win32PipeHasBeenEnded {
			logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): %s", err)
		}
		// Do NOT return err here as the container would have
		// started, otherwise docker will deadlock. It's perfectly legitimate
		// for WaitForProcessInComputeSystem to fail in situations such
		// as the container being killed on another thread.
		return execdriver.ExitStatus{ExitCode: hcsshim.WaitErrExecFailed}, nil
	}

	logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
	return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}
Пример #12
0
// Run implements the exec driver Driver interface
func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) {

	var (
		term execdriver.Terminal
		err  error
	)

	// Make sure the client isn't asking for options which aren't supported
	err = checkSupportedOptions(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	cu := &containerInit{
		SystemType:              "Container",
		Name:                    c.ID,
		Owner:                   defaultOwner,
		IsDummy:                 dummyMode,
		VolumePath:              c.Rootfs,
		IgnoreFlushesDuringBoot: c.FirstStart,
		LayerFolderPath:         c.LayerFolder,
		ProcessorWeight:         c.Resources.CPUShares,
		HostName:                c.Hostname,
	}

	for i := 0; i < len(c.LayerPaths); i++ {
		_, filename := filepath.Split(c.LayerPaths[i])
		g, err := hcsshim.NameToGuid(filename)
		if err != nil {
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		cu.Layers = append(cu.Layers, layer{
			ID:   g.ToString(),
			Path: c.LayerPaths[i],
		})
	}

	// TODO Windows. At some point, when there is CLI on docker run to
	// enable the IP Address of the container to be passed into docker run,
	// the IP Address needs to be wired through to HCS in the JSON. It
	// would be present in c.Network.Interface.IPAddress. See matching
	// TODO in daemon\container_windows.go, function populateCommand.

	if c.Network.Interface != nil {

		var pbs []portBinding

		// Enumerate through the port bindings specified by the user and convert
		// them into the internal structure matching the JSON blob that can be
		// understood by the HCS.
		for i, v := range c.Network.Interface.PortBindings {
			proto := strings.ToUpper(i.Proto())
			if proto != "TCP" && proto != "UDP" {
				return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto())
			}

			if len(v) > 1 {
				return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings")
			}

			for _, v2 := range v {
				var (
					iPort, ePort int
					err          error
				)
				if len(v2.HostIP) != 0 {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings")
				}
				if ePort, err = strconv.Atoi(v2.HostPort); err != nil {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err)
				}
				if iPort, err = strconv.Atoi(i.Port()); err != nil {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err)
				}
				if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 {
					return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range")
				}
				pbs = append(pbs,
					portBinding{ExternalPort: ePort,
						InternalPort: iPort,
						Protocol:     proto})
			}
		}

		// TODO Windows: TP3 workaround. Allow the user to override the name of
		// the Container NAT device through an environment variable. This will
		// ultimately be a global daemon parameter on Windows, similar to -b
		// for the name of the virtual switch (aka bridge).
		cn := os.Getenv("DOCKER_CONTAINER_NAT")
		if len(cn) == 0 {
			cn = defaultContainerNAT
		}

		dev := device{
			DeviceType: "Network",
			Connection: &networkConnection{
				NetworkName: c.Network.Interface.Bridge,
				// TODO Windows: Fixme, next line. Needs HCS fix.
				EnableNat: false,
				Nat: natSettings{
					Name:         cn,
					PortBindings: pbs,
				},
			},
		}

		if c.Network.Interface.MacAddress != "" {
			windowsStyleMAC := strings.Replace(
				c.Network.Interface.MacAddress, ":", "-", -1)
			dev.Settings = networkSettings{
				MacAddress: windowsStyleMAC,
			}
		}
		cu.Devices = append(cu.Devices, dev)
	} else {
		logrus.Debugln("No network interface")
	}

	configurationb, err := json.Marshal(cu)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	configuration := string(configurationb)

	err = hcsshim.CreateComputeSystem(c.ID, configuration)
	if err != nil {
		logrus.Debugln("Failed to create temporary container ", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Start the container
	logrus.Debugln("Starting container ", c.ID)
	err = hcsshim.StartComputeSystem(c.ID)
	if err != nil {
		logrus.Errorf("Failed to start compute system: %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer func() {
		// Stop the container
		if forceKill {
			logrus.Debugf("Forcibly terminating container %s", c.ID)
			if errno, err := hcsshim.TerminateComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
				logrus.Warnf("Ignoring error from TerminateComputeSystem 0x%X %s", errno, err)
			}
		} else {
			logrus.Debugf("Shutting down container %s", c.ID)
			if errno, err := hcsshim.ShutdownComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil {
				if errno != hcsshim.Win32SystemShutdownIsInProgress &&
					errno != hcsshim.Win32SpecifiedPathInvalid &&
					errno != hcsshim.Win32SystemCannotFindThePathSpecified {
					logrus.Warnf("Ignoring error from ShutdownComputeSystem 0x%X %s", errno, err)
				}
			}
		}
	}()

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   c.ProcessConfig.Tty,
		WorkingDirectory: c.WorkingDir,
		ConsoleSize:      c.ProcessConfig.ConsoleSize,
	}

	// Configure the environment for the process
	createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env)

	// This should get caught earlier, but just in case - validate that we
	// have something to run
	if c.ProcessConfig.Entrypoint == "" {
		err = errors.New("No entrypoint specified")
		logrus.Error(err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Build the command line of the process
	createProcessParms.CommandLine = c.ProcessConfig.Entrypoint
	for _, arg := range c.ProcessConfig.Arguments {
		logrus.Debugln("appending ", arg)
		createProcessParms.CommandLine += " " + syscall.EscapeArg(arg)
	}
	logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine)

	// Start the command running in the container.
	pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms)
	if err != nil {
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Now that the process has been launched, begin copying data to and from
	// the named pipes for the std handles.
	setupPipes(stdin, stdout, stderr, pipes)

	//Save the PID as we'll need this in Kill()
	logrus.Debugf("PID %d", pid)
	c.ContainerPid = int(pid)

	if c.ProcessConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	c.ProcessConfig.Terminal = term

	// Maintain our list of active containers. We'll need this later for exec
	// and other commands.
	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		command: c,
	}
	d.Unlock()

	if hooks.Start != nil {
		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, int(pid), chOOM)
	}

	var (
		exitCode int32
		errno    uint32
	)
	exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite)
	if err != nil {
		if errno != hcsshim.Win32PipeHasBeenEnded {
			logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): %s", err)
		}
		// Do NOT return err here as the container would have
		// started, otherwise docker will deadlock. It's perfectly legitimate
		// for WaitForProcessInComputeSystem to fail in situations such
		// as the container being killed on another thread.
		return execdriver.ExitStatus{ExitCode: hcsshim.WaitErrExecFailed}, nil
	}

	logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
	return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}
Пример #13
0
func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {

	var (
		term                           execdriver.Terminal
		err                            error
		inListen, outListen, errListen *npipe.PipeListener
	)

	// Make sure the client isn't asking for options which aren't supported
	err = checkSupportedOptions(c)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	cu := &containerInit{
		SystemType:              "Container",
		Name:                    c.ID,
		IsDummy:                 dummyMode,
		VolumePath:              c.Rootfs,
		IgnoreFlushesDuringBoot: c.FirstStart,
		LayerFolderPath:         c.LayerFolder,
	}

	for i := 0; i < len(c.LayerPaths); i++ {
		cu.Layers = append(cu.Layers, layer{
			Id:   hcsshim.NewGUID(c.LayerPaths[i]).ToString(),
			Path: c.LayerPaths[i],
		})
	}

	if c.Network.Interface != nil {
		dev := device{
			DeviceType: "Network",
			Connection: &networkConnection{
				NetworkName: c.Network.Interface.Bridge,
				EnableNat:   false,
			},
		}

		if c.Network.Interface.MacAddress != "" {
			windowsStyleMAC := strings.Replace(
				c.Network.Interface.MacAddress, ":", "-", -1)
			dev.Settings = networkSettings{
				MacAddress: windowsStyleMAC,
			}
		}

		logrus.Debugf("Virtual switch '%s', mac='%s'", c.Network.Interface.Bridge, c.Network.Interface.MacAddress)

		cu.Devices = append(cu.Devices, dev)
	} else {
		logrus.Debugln("No network interface")
	}

	configurationb, err := json.Marshal(cu)
	if err != nil {
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	configuration := string(configurationb)

	err = hcsshim.CreateComputeSystem(c.ID, configuration)
	if err != nil {
		logrus.Debugln("Failed to create temporary container ", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Start the container
	logrus.Debugln("Starting container ", c.ID)
	err = hcsshim.StartComputeSystem(c.ID)
	if err != nil {
		logrus.Errorf("Failed to start compute system: %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer func() {
		// Stop the container

		if terminateMode {
			logrus.Debugf("Terminating container %s", c.ID)
			if err := hcsshim.TerminateComputeSystem(c.ID); err != nil {
				// IMPORTANT: Don't fail if fails to change state. It could already
				// have been stopped through kill().
				// Otherwise, the docker daemon will hang in job wait()
				logrus.Warnf("Ignoring error from TerminateComputeSystem %s", err)
			}
		} else {
			logrus.Debugf("Shutting down container %s", c.ID)
			if err := hcsshim.ShutdownComputeSystem(c.ID); err != nil {
				// IMPORTANT: Don't fail if fails to change state. It could already
				// have been stopped through kill().
				// Otherwise, the docker daemon will hang in job wait()
				logrus.Warnf("Ignoring error from ShutdownComputeSystem %s", err)
			}
		}
	}()

	// We use a different pipe name between real and dummy mode in the HCS
	var serverPipeFormat, clientPipeFormat string
	if dummyMode {
		clientPipeFormat = `\\.\pipe\docker-run-%[1]s-%[2]s`
		serverPipeFormat = clientPipeFormat
	} else {
		clientPipeFormat = `\\.\pipe\docker-run-%[2]s`
		serverPipeFormat = `\\.\Containers\%[1]s\Device\NamedPipe\docker-run-%[2]s`
	}

	createProcessParms := hcsshim.CreateProcessParams{
		EmulateConsole:   c.ProcessConfig.Tty,
		WorkingDirectory: c.WorkingDir,
		ConsoleSize:      c.ProcessConfig.ConsoleSize,
	}

	// Configure the environment for the process
	createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env)

	// Connect stdin
	if pipes.Stdin != nil {
		stdInPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdin")
		createProcessParms.StdInPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdin")

		// Listen on the named pipe
		inListen, err = npipe.Listen(stdInPipe)
		if err != nil {
			logrus.Errorf("stdin failed to listen on %s err=%s", stdInPipe, err)
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		defer inListen.Close()

		// Launch a goroutine to do the accept. We do this so that we can
		// cause an otherwise blocking goroutine to gracefully close when
		// the caller (us) closes the listener
		go stdinAccept(inListen, stdInPipe, pipes.Stdin)
	}

	// Connect stdout
	stdOutPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stdout")
	createProcessParms.StdOutPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stdout")

	outListen, err = npipe.Listen(stdOutPipe)
	if err != nil {
		logrus.Errorf("stdout failed to listen on %s err=%s", stdOutPipe, err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}
	defer outListen.Close()
	go stdouterrAccept(outListen, stdOutPipe, pipes.Stdout)

	// No stderr on TTY.
	if !c.ProcessConfig.Tty {
		// Connect stderr
		stdErrPipe := fmt.Sprintf(serverPipeFormat, c.ID, "stderr")
		createProcessParms.StdErrPipe = fmt.Sprintf(clientPipeFormat, c.ID, "stderr")
		errListen, err = npipe.Listen(stdErrPipe)
		if err != nil {
			logrus.Errorf("stderr failed to listen on %s err=%s", stdErrPipe, err)
			return execdriver.ExitStatus{ExitCode: -1}, err
		}
		defer errListen.Close()
		go stdouterrAccept(errListen, stdErrPipe, pipes.Stderr)
	}

	// This should get caught earlier, but just in case - validate that we
	// have something to run
	if c.ProcessConfig.Entrypoint == "" {
		err = errors.New("No entrypoint specified")
		logrus.Error(err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	// Build the command line of the process
	createProcessParms.CommandLine = c.ProcessConfig.Entrypoint
	for _, arg := range c.ProcessConfig.Arguments {
		logrus.Debugln("appending ", arg)
		createProcessParms.CommandLine += " " + arg
	}
	logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine)

	// Start the command running in the container.
	var pid uint32
	pid, err = hcsshim.CreateProcessInComputeSystem(c.ID, createProcessParms)

	if err != nil {
		logrus.Errorf("CreateProcessInComputeSystem() failed %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	//Save the PID as we'll need this in Kill()
	logrus.Debugf("PID %d", pid)
	c.ContainerPid = int(pid)

	if c.ProcessConfig.Tty {
		term = NewTtyConsole(c.ID, pid)
	} else {
		term = NewStdConsole()
	}
	c.ProcessConfig.Terminal = term

	// Maintain our list of active containers. We'll need this later for exec
	// and other commands.
	d.Lock()
	d.activeContainers[c.ID] = &activeContainer{
		command: c,
	}
	d.Unlock()

	// Invoke the start callback
	if startCallback != nil {
		startCallback(&c.ProcessConfig, int(pid))
	}

	var exitCode int32
	exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid)
	if err != nil {
		logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err)
		return execdriver.ExitStatus{ExitCode: -1}, err
	}

	logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID)
	return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil
}