// Exec implements the exec driver Driver interface, // it calls libcontainer APIs to execute a container. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("No active container exists with ID %s", c.ID) } p := &libcontainer.Process{ Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: processConfig.User, } if processConfig.Privileged { p.Capabilities = execdriver.GetAllCapabilities() } // add CAP_ prefix to all caps for new libcontainer update to match // the spec format. for i, s := range p.Capabilities { if !strings.HasPrefix(s, "CAP_") { p.Capabilities[i] = fmt.Sprintf("CAP_%s", s) } } config := active.Config() if err := setupPipes(&config, processConfig, p, pipes); err != nil { return -1, err } if err := active.Start(p); err != nil { return -1, err } if hooks.Start != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return -1, err } // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, pid, chOOM) } ps, err := p.Wait() if err != nil { exitErr, ok := err.(*exec.ExitError) if !ok { return -1, err } ps = exitErr.ProcessState } return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil }
// Run uses the execution driver to run a given container func (daemon *Daemon) Run(c *container.Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) { hooks := execdriver.Hooks{ Start: startCallback, } hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { return daemon.setNetworkNamespaceKey(c.ID, pid) }) return daemon.execDriver.Run(c.Command, pipes, hooks) }
func (daemon *Daemon) run(c *Container, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (execdriver.ExitStatus, error) { hooks := execdriver.Hooks{ Start: startCallback, } hooks.PreStart = append(hooks.PreStart, func(processConfig *execdriver.ProcessConfig, pid int) error { return c.setNetworkNamespaceKey(pid) }) return daemon.execDriver.Run(c.command, pipes, hooks) }
// Exec implements the exec driver Driver interface, // it calls libcontainer APIs to execute a container. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("No active container exists with ID %s", c.ID) } p := &libcontainer.Process{ Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: processConfig.User, } if processConfig.Privileged { p.Capabilities = execdriver.GetAllCapabilities() } config := active.Config() if err := setupPipes(&config, processConfig, p, pipes); err != nil { return -1, err } if err := active.Start(p); err != nil { return -1, err } if hooks.Start != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return -1, err } hooks.Start(&c.ProcessConfig, pid) } ps, err := p.Wait() if err != nil { exitErr, ok := err.(*exec.ExitError) if !ok { return -1, err } ps = exitErr.ProcessState } return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil }
// Run implements the exec driver Driver interface, // it calls libcontainer APIs to run a container. func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c, hooks) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } p := &libcontainer.Process{ Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: c.ProcessConfig.User, } if err := setupPipes(container, &c.ProcessConfig, p, pipes); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cont, err := d.factory.Create(c.ID, container) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } d.Lock() d.activeContainers[c.ID] = cont d.Unlock() defer func() { cont.Destroy() d.cleanContainer(c.ID) }() if err := cont.Start(p); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } oom := notifyOnOOM(cont) if hooks.Start != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return execdriver.ExitStatus{ExitCode: -1}, err } hooks.Start(&c.ProcessConfig, pid, oom) } waitF := p.Wait if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) { // we need such hack for tracking processes with inherited fds, // because cmd.Wait() waiting for all streams to be copied waitF = waitInPIDHost(p, cont) } ps, err := waitF() if err != nil { execErr, ok := err.(*exec.ExitError) if !ok { return execdriver.ExitStatus{ExitCode: -1}, err } ps = execErr.ProcessState } cont.Destroy() _, oomKill := <-oom return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil }
// Exec implements the exec driver Driver interface. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { var ( term execdriver.Terminal err error exitCode int32 errno uint32 ) active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID) } createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: processConfig.Tty, // Note NOT c.ProcessConfig.Tty WorkingDirectory: c.WorkingDir, } // Configure the environment for the process // Note NOT c.ProcessConfig.Env createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env) // Create the commandline for the process // Note NOT c.ProcessConfig createProcessParms.CommandLine, err = createCommandLine(processConfig, false) if err != nil { return -1, err } // Start the command running in the container. pid, stdin, stdout, stderr, rc, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms) if err != nil { // TODO Windows: TP4 Workaround. In Hyper-V containers, there is a limitation // of one exec per container. This should be fixed post TP4. CreateProcessInComputeSystem // will return a specific error which we handle here to give a good error message // back to the user instead of an inactionable "An invalid argument was supplied" if rc == hcsshim.Win32InvalidArgument { return -1, fmt.Errorf("The limit of docker execs per Hyper-V container has been exceeded") } logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return -1, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) // Note NOT c.ProcessConfig.Tty if processConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } processConfig.Terminal = term // Invoke the start callback if hooks.Start != nil { // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, int(pid), chOOM) } if exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite); err != nil { if errno == hcsshim.Win32PipeHasBeenEnded { logrus.Debugf("Exiting Run() after WaitForProcessInComputeSystem failed with recognised error 0x%X", errno) return hcsshim.WaitErrExecFailed, nil } logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): 0x%X %s", errno, err) return -1, err } logrus.Debugln("Exiting Run()", c.ID) return int(exitCode), nil }
// Run implements the exec driver Driver interface func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error ) cu := &containerInit{ SystemType: "Container", Name: c.ID, Owner: defaultOwner, IsDummy: dummyMode, VolumePath: c.Rootfs, IgnoreFlushesDuringBoot: c.FirstStart, LayerFolderPath: c.LayerFolder, ProcessorWeight: c.Resources.CPUShares, HostName: c.Hostname, } cu.HvPartition = c.HvPartition if cu.HvPartition { cu.SandboxPath = filepath.Dir(c.LayerFolder) } else { cu.VolumePath = c.Rootfs cu.LayerFolderPath = c.LayerFolder } for _, layerPath := range c.LayerPaths { _, filename := filepath.Split(layerPath) g, err := hcsshim.NameToGuid(filename) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cu.Layers = append(cu.Layers, layer{ ID: g.ToString(), Path: layerPath, }) } // Add the mounts (volumes, bind mounts etc) to the structure mds := make([]mappedDir, len(c.Mounts)) for i, mount := range c.Mounts { mds[i] = mappedDir{ HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: !mount.Writable} } cu.MappedDirectories = mds // TODO Windows. At some point, when there is CLI on docker run to // enable the IP Address of the container to be passed into docker run, // the IP Address needs to be wired through to HCS in the JSON. It // would be present in c.Network.Interface.IPAddress. See matching // TODO in daemon\container_windows.go, function populateCommand. if c.Network.Interface != nil { var pbs []portBinding // Enumerate through the port bindings specified by the user and convert // them into the internal structure matching the JSON blob that can be // understood by the HCS. for i, v := range c.Network.Interface.PortBindings { proto := strings.ToUpper(i.Proto()) if proto != "TCP" && proto != "UDP" { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto()) } if len(v) > 1 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings") } for _, v2 := range v { var ( iPort, ePort int err error ) if len(v2.HostIP) != 0 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings") } if ePort, err = strconv.Atoi(v2.HostPort); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err) } if iPort, err = strconv.Atoi(i.Port()); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err) } if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range") } pbs = append(pbs, portBinding{ExternalPort: ePort, InternalPort: iPort, Protocol: proto}) } } // TODO Windows: TP3 workaround. Allow the user to override the name of // the Container NAT device through an environment variable. This will // ultimately be a global daemon parameter on Windows, similar to -b // for the name of the virtual switch (aka bridge). cn := os.Getenv("DOCKER_CONTAINER_NAT") if len(cn) == 0 { cn = defaultContainerNAT } dev := device{ DeviceType: "Network", Connection: &networkConnection{ NetworkName: c.Network.Interface.Bridge, // TODO Windows: Fixme, next line. Needs HCS fix. EnableNat: false, Nat: natSettings{ Name: cn, PortBindings: pbs, }, }, } if c.Network.Interface.MacAddress != "" { windowsStyleMAC := strings.Replace( c.Network.Interface.MacAddress, ":", "-", -1) dev.Settings = networkSettings{ MacAddress: windowsStyleMAC, } } cu.Devices = append(cu.Devices, dev) } else { logrus.Debugln("No network interface") } configurationb, err := json.Marshal(cu) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } configuration := string(configurationb) // TODO Windows TP5 timeframe. Remove when TP4 is no longer supported. // The following a workaround for Windows TP4 which has a networking // bug which fairly frequently returns an error. Back off and retry. maxAttempts := 1 if TP4RetryHack { maxAttempts = 5 } i := 0 for i < maxAttempts { i++ err = hcsshim.CreateComputeSystem(c.ID, configuration) if err != nil { if TP4RetryHack { if !strings.Contains(err.Error(), `Win32 API call returned error r1=0x800401f3`) && // Invalid class string !strings.Contains(err.Error(), `Win32 API call returned error r1=0x80070490`) && // Element not found !strings.Contains(err.Error(), `Win32 API call returned error r1=0x80070002`) && // The system cannot find the file specified !strings.Contains(err.Error(), `Win32 API call returned error r1=0x800704c6`) && // The network is not present or not started !strings.Contains(err.Error(), `Win32 API call returned error r1=0x800700a1`) { // The specified path is invalid logrus.Debugln("Failed to create temporary container ", err) return execdriver.ExitStatus{ExitCode: -1}, err } logrus.Warnf("Invoking Windows TP4 retry hack (%d of %d)", i, maxAttempts-1) time.Sleep(50 * time.Millisecond) } } else { break } } // Start the container logrus.Debugln("Starting container ", c.ID) err = hcsshim.StartComputeSystem(c.ID) if err != nil { logrus.Errorf("Failed to start compute system: %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } defer func() { // Stop the container if forceKill { logrus.Debugf("Forcibly terminating container %s", c.ID) if errno, err := hcsshim.TerminateComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil { logrus.Warnf("Ignoring error from TerminateComputeSystem 0x%X %s", errno, err) } } else { logrus.Debugf("Shutting down container %s", c.ID) if errno, err := hcsshim.ShutdownComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil { if errno != hcsshim.Win32SystemShutdownIsInProgress && errno != hcsshim.Win32SpecifiedPathInvalid && errno != hcsshim.Win32SystemCannotFindThePathSpecified { logrus.Warnf("Ignoring error from ShutdownComputeSystem 0x%X %s", errno, err) } } } }() createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: c.ProcessConfig.Tty, WorkingDirectory: c.WorkingDir, ConsoleSize: c.ProcessConfig.ConsoleSize, } // Configure the environment for the process createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env) createProcessParms.CommandLine, err = createCommandLine(&c.ProcessConfig, c.ArgsEscaped) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } // Start the command running in the container. pid, stdin, stdout, stderr, _, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms) if err != nil { logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) //Save the PID as we'll need this in Kill() logrus.Debugf("PID %d", pid) c.ContainerPid = int(pid) if c.ProcessConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } c.ProcessConfig.Terminal = term // Maintain our list of active containers. We'll need this later for exec // and other commands. d.Lock() d.activeContainers[c.ID] = &activeContainer{ command: c, } d.Unlock() if hooks.Start != nil { // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, int(pid), chOOM) } var ( exitCode int32 errno uint32 ) exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite) if err != nil { if errno != hcsshim.Win32PipeHasBeenEnded { logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): %s", err) } // Do NOT return err here as the container would have // started, otherwise docker will deadlock. It's perfectly legitimate // for WaitForProcessInComputeSystem to fail in situations such // as the container being killed on another thread. return execdriver.ExitStatus{ExitCode: hcsshim.WaitErrExecFailed}, nil } logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID) return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil }
// Run implements the exec driver Driver interface, // it calls libcontainer APIs to run a container. func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { destroyed := false var err error c.TmpDir, err = ioutil.TempDir("", c.ID) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } defer os.RemoveAll(c.TmpDir) // take the Command and populate the libcontainer.Config from it container, err := d.createContainer(c, hooks) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } p := &libcontainer.Process{ Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: c.ProcessConfig.User, } wg := sync.WaitGroup{} writers, err := setupPipes(container, &c.ProcessConfig, p, pipes, &wg) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cont, err := d.factory.Create(c.ID, container) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } d.Lock() d.activeContainers[c.ID] = cont d.Unlock() defer func() { if !destroyed { cont.Destroy() } d.cleanContainer(c.ID) }() if err := cont.Start(p); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } //close the write end of any opened pipes now that they are dup'ed into the container for _, writer := range writers { writer.Close() } // 'oom' is used to emit 'oom' events to the eventstream, 'oomKilled' is used // to set the 'OOMKilled' flag in state oom := notifyOnOOM(cont) oomKilled := notifyOnOOM(cont) if hooks.Start != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return execdriver.ExitStatus{ExitCode: -1}, err } hooks.Start(&c.ProcessConfig, pid, oom) } waitF := p.Wait if nss := cont.Config().Namespaces; !nss.Contains(configs.NEWPID) { // we need such hack for tracking processes with inherited fds, // because cmd.Wait() waiting for all streams to be copied waitF = waitInPIDHost(p, cont) } ps, err := waitF() if err != nil { execErr, ok := err.(*exec.ExitError) if !ok { return execdriver.ExitStatus{ExitCode: -1}, err } ps = execErr.ProcessState } // wait for all IO goroutine copiers to finish wg.Wait() cont.Destroy() destroyed = true // oomKilled will have an oom event if any process within the container was // OOM killed at any time, not only if the init process OOMed. // // Perhaps we only want the OOMKilled flag to be set if the OOM // resulted in a container death, but there isn't a good way to do this // because the kernel's cgroup oom notification does not provide information // such as the PID. This could be heuristically done by checking that the OOM // happened within some very small time slice for the container dying (and // optionally exit-code 137), but I don't think the cgroup oom notification // can be used to reliably determine this // // Even if there were multiple OOMs, it's sufficient to read one value // because libcontainer's oom notify will discard the channel after the // cgroup is destroyed _, oomKill := <-oomKilled return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil }
// Exec implements the exec driver Driver interface. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { var ( term execdriver.Terminal err error exitCode int32 errno uint32 ) active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID) } createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: processConfig.Tty, // Note NOT c.ProcessConfig.Tty WorkingDirectory: c.WorkingDir, } // Configure the environment for the process // Note NOT c.ProcessConfig.Tty createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env) // While this should get caught earlier, just in case, validate that we // have something to run. if processConfig.Entrypoint == "" { err = errors.New("No entrypoint specified") logrus.Error(err) return -1, err } // Build the command line of the process createProcessParms.CommandLine = processConfig.Entrypoint for _, arg := range processConfig.Arguments { logrus.Debugln("appending ", arg) createProcessParms.CommandLine += " " + arg } logrus.Debugln("commandLine: ", createProcessParms.CommandLine) // Start the command running in the container. pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms) if err != nil { logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return -1, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) // Note NOT c.ProcessConfig.Tty if processConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } processConfig.Terminal = term // Invoke the start callback if hooks.Start != nil { // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, int(pid), chOOM) } if exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite); err != nil { if errno == hcsshim.Win32PipeHasBeenEnded { logrus.Debugf("Exiting Run() after WaitForProcessInComputeSystem failed with recognised error 0x%X", errno) return hcsshim.WaitErrExecFailed, nil } logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): 0x%X %s", errno, err) return -1, err } logrus.Debugln("Exiting Run()", c.ID) return int(exitCode), nil }
// Run implements the exec driver Driver interface func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error ) // Make sure the client isn't asking for options which aren't supported err = checkSupportedOptions(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cu := &containerInit{ SystemType: "Container", Name: c.ID, Owner: defaultOwner, IsDummy: dummyMode, VolumePath: c.Rootfs, IgnoreFlushesDuringBoot: c.FirstStart, LayerFolderPath: c.LayerFolder, } for i := 0; i < len(c.LayerPaths); i++ { _, filename := filepath.Split(c.LayerPaths[i]) g, err := hcsshim.NameToGuid(filename) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cu.Layers = append(cu.Layers, layer{ ID: g.ToString(), Path: c.LayerPaths[i], }) } // TODO Windows. At some point, when there is CLI on docker run to // enable the IP Address of the container to be passed into docker run, // the IP Address needs to be wired through to HCS in the JSON. It // would be present in c.Network.Interface.IPAddress. See matching // TODO in daemon\container_windows.go, function populateCommand. if c.Network.Interface != nil { var pbs []portBinding // Enumerate through the port bindings specified by the user and convert // them into the internal structure matching the JSON blob that can be // understood by the HCS. for i, v := range c.Network.Interface.PortBindings { proto := strings.ToUpper(i.Proto()) if proto != "TCP" && proto != "UDP" { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto()) } if len(v) > 1 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings") } for _, v2 := range v { var ( iPort, ePort int err error ) if len(v2.HostIP) != 0 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings") } if ePort, err = strconv.Atoi(v2.HostPort); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err) } if iPort, err = strconv.Atoi(i.Port()); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err) } if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range") } pbs = append(pbs, portBinding{ExternalPort: ePort, InternalPort: iPort, Protocol: proto}) } } // TODO Windows: TP3 workaround. Allow the user to override the name of // the Container NAT device through an environment variable. This will // ultimately be a global daemon parameter on Windows, similar to -b // for the name of the virtual switch (aka bridge). cn := os.Getenv("DOCKER_CONTAINER_NAT") if len(cn) == 0 { cn = defaultContainerNAT } dev := device{ DeviceType: "Network", Connection: &networkConnection{ NetworkName: c.Network.Interface.Bridge, // TODO Windows: Fixme, next line. Needs HCS fix. EnableNat: false, Nat: natSettings{ Name: cn, PortBindings: pbs, }, }, } if c.Network.Interface.MacAddress != "" { windowsStyleMAC := strings.Replace( c.Network.Interface.MacAddress, ":", "-", -1) dev.Settings = networkSettings{ MacAddress: windowsStyleMAC, } } cu.Devices = append(cu.Devices, dev) } else { logrus.Debugln("No network interface") } configurationb, err := json.Marshal(cu) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } configuration := string(configurationb) err = hcsshim.CreateComputeSystem(c.ID, configuration) if err != nil { logrus.Debugln("Failed to create temporary container ", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Start the container logrus.Debugln("Starting container ", c.ID) err = hcsshim.StartComputeSystem(c.ID) if err != nil { logrus.Errorf("Failed to start compute system: %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } defer func() { // Stop the container if terminateMode { logrus.Debugf("Terminating container %s", c.ID) if err := hcsshim.TerminateComputeSystem(c.ID); err != nil { // IMPORTANT: Don't fail if fails to change state. It could already // have been stopped through kill(). // Otherwise, the docker daemon will hang in job wait() logrus.Warnf("Ignoring error from TerminateComputeSystem %s", err) } } else { logrus.Debugf("Shutting down container %s", c.ID) if err := hcsshim.ShutdownComputeSystem(c.ID); err != nil { // IMPORTANT: Don't fail if fails to change state. It could already // have been stopped through kill(). // Otherwise, the docker daemon will hang in job wait() logrus.Warnf("Ignoring error from ShutdownComputeSystem %s", err) } } }() createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: c.ProcessConfig.Tty, WorkingDirectory: c.WorkingDir, ConsoleSize: c.ProcessConfig.ConsoleSize, } // Configure the environment for the process createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env) // This should get caught earlier, but just in case - validate that we // have something to run if c.ProcessConfig.Entrypoint == "" { err = errors.New("No entrypoint specified") logrus.Error(err) return execdriver.ExitStatus{ExitCode: -1}, err } // Build the command line of the process createProcessParms.CommandLine = c.ProcessConfig.Entrypoint for _, arg := range c.ProcessConfig.Arguments { logrus.Debugln("appending ", arg) createProcessParms.CommandLine += " " + arg } logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine) // Start the command running in the container. pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms) if err != nil { logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) //Save the PID as we'll need this in Kill() logrus.Debugf("PID %d", pid) c.ContainerPid = int(pid) if c.ProcessConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } c.ProcessConfig.Terminal = term // Maintain our list of active containers. We'll need this later for exec // and other commands. d.Lock() d.activeContainers[c.ID] = &activeContainer{ command: c, } d.Unlock() if hooks.Start != nil { hooks.Start(&c.ProcessConfig, int(pid)) } var exitCode int32 exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid) if err != nil { logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID) return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil }
// Run implements the exec driver Driver interface, // it calls 'exec.Cmd' to launch lxc commands to run a container. func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error dataPath = d.containerDir(c.ID) ) if c.Network == nil || (c.Network.NamespacePath == "" && c.Network.ContainerID == "") { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("empty namespace path for non-container network") } container, err := d.createContainer(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } if c.ProcessConfig.Tty { term, err = NewTtyConsole(&c.ProcessConfig, pipes) } else { term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) } if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } c.ProcessConfig.Terminal = term d.Lock() d.activeContainers[c.ID] = &activeContainer{ container: container, cmd: &c.ProcessConfig.Cmd, } d.Unlock() c.Mounts = append(c.Mounts, execdriver.Mount{ Source: d.initPath, Destination: c.InitPath, Writable: false, Private: true, }) if err := d.generateEnvConfig(c); err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } configPath, err := d.generateLXCConfig(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } params := []string{ "lxc-start", "-n", c.ID, "-f", configPath, "-q", } // From lxc>=1.1 the default behavior is to daemonize containers after start lxcVersion := version.Version(d.version()) if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) { params = append(params, "-F") } proc := &os.Process{} if c.Network.ContainerID != "" { params = append(params, "--share-net", c.Network.ContainerID, ) } else { proc, err = setupNetNs(c.Network.NamespacePath) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } pidStr := fmt.Sprintf("%d", proc.Pid) params = append(params, "--share-net", pidStr) } if c.Ipc != nil { if c.Ipc.ContainerID != "" { params = append(params, "--share-ipc", c.Ipc.ContainerID, ) } else if c.Ipc.HostIpc { params = append(params, "--share-ipc", "1", ) } } params = append(params, "--", c.InitPath, ) if c.ProcessConfig.User != "" { params = append(params, "-u", c.ProcessConfig.User) } if c.ProcessConfig.Privileged { if d.apparmor { params[0] = path.Join(d.root, "lxc-start-unconfined") } params = append(params, "-privileged") } if c.WorkingDir != "" { params = append(params, "-w", c.WorkingDir) } params = append(params, "--", c.ProcessConfig.Entrypoint) params = append(params, c.ProcessConfig.Arguments...) if d.sharedRoot { // lxc-start really needs / to be non-shared, or all kinds of stuff break // when lxc-start unmount things and those unmounts propagate to the main // mount namespace. // What we really want is to clone into a new namespace and then // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork // without exec in go we have to do this horrible shell hack... shellString := "mount --make-rslave /; exec " + stringutils.ShellQuoteArguments(params) params = []string{ "unshare", "-m", "--", "/bin/sh", "-c", shellString, } } logrus.Debugf("lxc params %s", params) var ( name = params[0] arg = params[1:] ) aname, err := exec.LookPath(name) if err != nil { aname = name } c.ProcessConfig.Path = aname c.ProcessConfig.Args = append([]string{name}, arg...) if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { killNetNsProc(proc) return execdriver.ExitStatus{ExitCode: -1}, err } if err := c.ProcessConfig.Start(); err != nil { killNetNsProc(proc) return execdriver.ExitStatus{ExitCode: -1}, err } var ( waitErr error waitLock = make(chan struct{}) ) go func() { if err := c.ProcessConfig.Wait(); err != nil { if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 waitErr = err } } close(waitLock) }() terminate := func(terr error) (execdriver.ExitStatus, error) { if c.ProcessConfig.Process != nil { c.ProcessConfig.Process.Kill() c.ProcessConfig.Wait() } return execdriver.ExitStatus{ExitCode: -1}, terr } // Poll lxc for RUNNING status pid, err := d.waitForStart(c, waitLock) if err != nil { killNetNsProc(proc) return terminate(err) } killNetNsProc(proc) cgroupPaths, err := cgroupPaths(c.ID) if err != nil { return terminate(err) } state := &libcontainer.State{ InitProcessPid: pid, CgroupPaths: cgroupPaths, } f, err := os.Create(filepath.Join(dataPath, "state.json")) if err != nil { return terminate(err) } defer f.Close() if err := json.NewEncoder(f).Encode(state); err != nil { return terminate(err) } c.ContainerPid = pid oomKill := false oomKillNotification, err := notifyOnOOM(cgroupPaths) if hooks.Start != nil { logrus.Debugf("Invoking startCallback") hooks.Start(&c.ProcessConfig, pid, oomKillNotification) } <-waitLock exitCode := getExitCode(c) if err == nil { _, oomKill = <-oomKillNotification logrus.Debugf("oomKill error: %v, waitErr: %v", oomKill, waitErr) } else { logrus.Warnf("Your kernel does not support OOM notifications: %s", err) } // check oom error if oomKill { exitCode = 137 } return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr }
// Exec implements the exec driver Driver interface, // it calls libcontainer APIs to execute a container. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("No active container exists with ID %s", c.ID) } user := processConfig.User if c.RemappedRoot.UID != 0 && user == "" { //if user namespaces are enabled, set user explicitly so uid/gid is set to 0 //otherwise we end up with the overflow id and no permissions (65534) user = "******" } p := &libcontainer.Process{ Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...), Env: c.ProcessConfig.Env, Cwd: c.WorkingDir, User: user, } if processConfig.Privileged { p.Capabilities = execdriver.GetAllCapabilities() } // add CAP_ prefix to all caps for new libcontainer update to match // the spec format. for i, s := range p.Capabilities { if !strings.HasPrefix(s, "CAP_") { p.Capabilities[i] = fmt.Sprintf("CAP_%s", s) } } config := active.Config() wg := sync.WaitGroup{} writers, err := setupPipes(&config, processConfig, p, pipes, &wg) if err != nil { return -1, err } if err := active.Start(p); err != nil { return -1, err } //close the write end of any opened pipes now that they are dup'ed into the container for _, writer := range writers { writer.Close() } if hooks.Start != nil { pid, err := p.Pid() if err != nil { p.Signal(os.Kill) p.Wait() return -1, err } // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, pid, chOOM) } ps, err := p.Wait() if err != nil { exitErr, ok := err.(*exec.ExitError) if !ok { return -1, err } ps = exitErr.ProcessState } // wait for all IO goroutine copiers to finish wg.Wait() return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil }
// Exec implements the exec driver Driver interface. func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) { var ( term execdriver.Terminal err error exitCode int32 ) active := d.activeContainers[c.ID] if active == nil { return -1, fmt.Errorf("Exec - No active container exists with ID %s", c.ID) } createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: processConfig.Tty, // Note NOT c.ProcessConfig.Tty WorkingDirectory: c.WorkingDir, } // Configure the environment for the process // Note NOT c.ProcessConfig.Tty createProcessParms.Environment = setupEnvironmentVariables(processConfig.Env) // While this should get caught earlier, just in case, validate that we // have something to run. if processConfig.Entrypoint == "" { err = errors.New("No entrypoint specified") logrus.Error(err) return -1, err } // Build the command line of the process createProcessParms.CommandLine = processConfig.Entrypoint for _, arg := range processConfig.Arguments { logrus.Debugln("appending ", arg) createProcessParms.CommandLine += " " + arg } logrus.Debugln("commandLine: ", createProcessParms.CommandLine) // Start the command running in the container. pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !processConfig.Tty, createProcessParms) if err != nil { logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return -1, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) // Note NOT c.ProcessConfig.Tty if processConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } processConfig.Terminal = term // Invoke the start callback if hooks.Start != nil { hooks.Start(&c.ProcessConfig, int(pid)) } if exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid); err != nil { logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err) return -1, err } // TODO Windows - Do something with this exit code logrus.Debugln("Exiting Run() with ExitCode 0", c.ID) return int(exitCode), nil }