// Create is the entrypoint to create a container from a spec, and if successfully // created, start it too. func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) error { logrus.Debugln("LCD client.Create() with spec", spec) cu := &containerInit{ SystemType: "Container", Name: containerID, Owner: defaultOwner, VolumePath: spec.Root.Path, IgnoreFlushesDuringBoot: spec.Windows.FirstStart, LayerFolderPath: spec.Windows.LayerFolder, HostName: spec.Hostname, } if spec.Windows.Networking != nil { cu.EndpointList = spec.Windows.Networking.EndpointList } if spec.Windows.Resources != nil { if spec.Windows.Resources.CPU != nil { if spec.Windows.Resources.CPU.Shares != nil { cu.ProcessorWeight = *spec.Windows.Resources.CPU.Shares } if spec.Windows.Resources.CPU.Percent != nil { cu.ProcessorMaximum = *spec.Windows.Resources.CPU.Percent * 100 // ProcessorMaximum is a value between 1 and 10000 } } if spec.Windows.Resources.Memory != nil { if spec.Windows.Resources.Memory.Limit != nil { cu.MemoryMaximumInMB = *spec.Windows.Resources.Memory.Limit / 1024 / 1024 } } if spec.Windows.Resources.Storage != nil { if spec.Windows.Resources.Storage.Bps != nil { cu.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps } if spec.Windows.Resources.Storage.Iops != nil { cu.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops } if spec.Windows.Resources.Storage.SandboxSize != nil { cu.StorageSandboxSize = *spec.Windows.Resources.Storage.SandboxSize } } } if spec.Windows.HvRuntime != nil { cu.HvPartition = true cu.HvRuntime = &hvRuntime{ ImagePath: spec.Windows.HvRuntime.ImagePath, } } for _, option := range options { if s, ok := option.(*ServicingOption); ok { cu.Servicing = s.IsServicing break } } if cu.HvPartition { cu.SandboxPath = filepath.Dir(spec.Windows.LayerFolder) } else { cu.VolumePath = spec.Root.Path cu.LayerFolderPath = spec.Windows.LayerFolder } for _, layerPath := range spec.Windows.LayerPaths { _, filename := filepath.Split(layerPath) g, err := hcsshim.NameToGuid(filename) if err != nil { return err } cu.Layers = append(cu.Layers, layer{ ID: g.ToString(), Path: layerPath, }) } // Add the mounts (volumes, bind mounts etc) to the structure mds := make([]mappedDir, len(spec.Mounts)) for i, mount := range spec.Mounts { mds[i] = mappedDir{ HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: mount.Readonly} } cu.MappedDirectories = mds configurationb, err := json.Marshal(cu) if err != nil { return err } // Create the compute system configuration := string(configurationb) if err := hcsshim.CreateComputeSystem(containerID, configuration); err != nil { return err } // Construct a container object for calling start on it. container := &container{ containerCommon: containerCommon{ process: process{ processCommon: processCommon{ containerID: containerID, client: clnt, friendlyName: InitFriendlyName, }, commandLine: strings.Join(spec.Process.Args, " "), }, processes: make(map[string]*process), }, ociSpec: spec, } container.options = options for _, option := range options { if err := option.Apply(container); err != nil { logrus.Error(err) } } // Call start, and if it fails, delete the container from our // internal structure, and also keep HCS in sync by deleting the // container there. logrus.Debugf("Create() id=%s, Calling start()", containerID) if err := container.start(); err != nil { clnt.deleteContainer(containerID) return err } logrus.Debugf("Create() id=%s completed successfully", containerID) return nil }
// Run implements the exec driver Driver interface func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, hooks execdriver.Hooks) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error ) cu := &containerInit{ SystemType: "Container", Name: c.ID, Owner: defaultOwner, IsDummy: dummyMode, VolumePath: c.Rootfs, IgnoreFlushesDuringBoot: c.FirstStart, LayerFolderPath: c.LayerFolder, ProcessorWeight: c.Resources.CPUShares, HostName: c.Hostname, } cu.HvPartition = c.HvPartition if cu.HvPartition { cu.SandboxPath = filepath.Dir(c.LayerFolder) } else { cu.VolumePath = c.Rootfs cu.LayerFolderPath = c.LayerFolder } for _, layerPath := range c.LayerPaths { _, filename := filepath.Split(layerPath) g, err := hcsshim.NameToGuid(filename) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cu.Layers = append(cu.Layers, layer{ ID: g.ToString(), Path: layerPath, }) } // Add the mounts (volumes, bind mounts etc) to the structure mds := make([]mappedDir, len(c.Mounts)) for i, mount := range c.Mounts { mds[i] = mappedDir{ HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: !mount.Writable} } cu.MappedDirectories = mds // TODO Windows. At some point, when there is CLI on docker run to // enable the IP Address of the container to be passed into docker run, // the IP Address needs to be wired through to HCS in the JSON. It // would be present in c.Network.Interface.IPAddress. See matching // TODO in daemon\container_windows.go, function populateCommand. if c.Network.Interface != nil { var pbs []portBinding // Enumerate through the port bindings specified by the user and convert // them into the internal structure matching the JSON blob that can be // understood by the HCS. for i, v := range c.Network.Interface.PortBindings { proto := strings.ToUpper(i.Proto()) if proto != "TCP" && proto != "UDP" { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto()) } if len(v) > 1 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings") } for _, v2 := range v { var ( iPort, ePort int err error ) if len(v2.HostIP) != 0 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings") } if ePort, err = strconv.Atoi(v2.HostPort); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err) } if iPort, err = strconv.Atoi(i.Port()); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err) } if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range") } pbs = append(pbs, portBinding{ExternalPort: ePort, InternalPort: iPort, Protocol: proto}) } } // TODO Windows: TP3 workaround. Allow the user to override the name of // the Container NAT device through an environment variable. This will // ultimately be a global daemon parameter on Windows, similar to -b // for the name of the virtual switch (aka bridge). cn := os.Getenv("DOCKER_CONTAINER_NAT") if len(cn) == 0 { cn = defaultContainerNAT } dev := device{ DeviceType: "Network", Connection: &networkConnection{ NetworkName: c.Network.Interface.Bridge, // TODO Windows: Fixme, next line. Needs HCS fix. EnableNat: false, Nat: natSettings{ Name: cn, PortBindings: pbs, }, }, } if c.Network.Interface.MacAddress != "" { windowsStyleMAC := strings.Replace( c.Network.Interface.MacAddress, ":", "-", -1) dev.Settings = networkSettings{ MacAddress: windowsStyleMAC, } } cu.Devices = append(cu.Devices, dev) } else { logrus.Debugln("No network interface") } configurationb, err := json.Marshal(cu) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } configuration := string(configurationb) // TODO Windows TP5 timeframe. Remove when TP4 is no longer supported. // The following a workaround for Windows TP4 which has a networking // bug which fairly frequently returns an error. Back off and retry. maxAttempts := 1 if TP4RetryHack { maxAttempts = 5 } i := 0 for i < maxAttempts { i++ err = hcsshim.CreateComputeSystem(c.ID, configuration) if err != nil { if TP4RetryHack { if !strings.Contains(err.Error(), `Win32 API call returned error r1=0x800401f3`) && // Invalid class string !strings.Contains(err.Error(), `Win32 API call returned error r1=0x80070490`) && // Element not found !strings.Contains(err.Error(), `Win32 API call returned error r1=0x80070002`) && // The system cannot find the file specified !strings.Contains(err.Error(), `Win32 API call returned error r1=0x800704c6`) && // The network is not present or not started !strings.Contains(err.Error(), `Win32 API call returned error r1=0x800700a1`) { // The specified path is invalid logrus.Debugln("Failed to create temporary container ", err) return execdriver.ExitStatus{ExitCode: -1}, err } logrus.Warnf("Invoking Windows TP4 retry hack (%d of %d)", i, maxAttempts-1) time.Sleep(50 * time.Millisecond) } } else { break } } // Start the container logrus.Debugln("Starting container ", c.ID) err = hcsshim.StartComputeSystem(c.ID) if err != nil { logrus.Errorf("Failed to start compute system: %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } defer func() { // Stop the container if forceKill { logrus.Debugf("Forcibly terminating container %s", c.ID) if errno, err := hcsshim.TerminateComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil { logrus.Warnf("Ignoring error from TerminateComputeSystem 0x%X %s", errno, err) } } else { logrus.Debugf("Shutting down container %s", c.ID) if errno, err := hcsshim.ShutdownComputeSystem(c.ID, hcsshim.TimeoutInfinite, "exec-run-defer"); err != nil { if errno != hcsshim.Win32SystemShutdownIsInProgress && errno != hcsshim.Win32SpecifiedPathInvalid && errno != hcsshim.Win32SystemCannotFindThePathSpecified { logrus.Warnf("Ignoring error from ShutdownComputeSystem 0x%X %s", errno, err) } } } }() createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: c.ProcessConfig.Tty, WorkingDirectory: c.WorkingDir, ConsoleSize: c.ProcessConfig.ConsoleSize, } // Configure the environment for the process createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env) createProcessParms.CommandLine, err = createCommandLine(&c.ProcessConfig, c.ArgsEscaped) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } // Start the command running in the container. pid, stdin, stdout, stderr, _, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms) if err != nil { logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) //Save the PID as we'll need this in Kill() logrus.Debugf("PID %d", pid) c.ContainerPid = int(pid) if c.ProcessConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } c.ProcessConfig.Terminal = term // Maintain our list of active containers. We'll need this later for exec // and other commands. d.Lock() d.activeContainers[c.ID] = &activeContainer{ command: c, } d.Unlock() if hooks.Start != nil { // A closed channel for OOM is returned here as it will be // non-blocking and return the correct result when read. chOOM := make(chan struct{}) close(chOOM) hooks.Start(&c.ProcessConfig, int(pid), chOOM) } var ( exitCode int32 errno uint32 ) exitCode, errno, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid, hcsshim.TimeoutInfinite) if err != nil { if errno != hcsshim.Win32PipeHasBeenEnded { logrus.Warnf("WaitForProcessInComputeSystem failed (container may have been killed): %s", err) } // Do NOT return err here as the container would have // started, otherwise docker will deadlock. It's perfectly legitimate // for WaitForProcessInComputeSystem to fail in situations such // as the container being killed on another thread. return execdriver.ExitStatus{ExitCode: hcsshim.WaitErrExecFailed}, nil } logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID) return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil }
// Create is the entrypoint to create a container from a spec, and if successfully // created, start it too. Table below shows the fields required for HCS JSON calling parameters, // where if not populated, is omitted. // +-----------------+--------------------------------------------+---------------------------------------------------+ // | | Isolation=Process | Isolation=Hyper-V | // +-----------------+--------------------------------------------+---------------------------------------------------+ // | VolumePath | \\?\\Volume{GUIDa} | | // | LayerFolderPath | %root%\windowsfilter\containerID | %root%\windowsfilter\containerID (servicing only) | // | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | // | SandboxPath | | %root%\windowsfilter | // | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | // +-----------------+--------------------------------------------+---------------------------------------------------+ // // Isolation=Process example: // // { // "SystemType": "Container", // "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", // "Owner": "docker", // "IsDummy": false, // "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", // "IgnoreFlushesDuringBoot": true, // "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", // "Layers": [{ // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" // }], // "HostName": "5e0055c814a6", // "MappedDirectories": [], // "HvPartition": false, // "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], // "Servicing": false //} // // Isolation=Hyper-V example: // //{ // "SystemType": "Container", // "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", // "Owner": "docker", // "IsDummy": false, // "IgnoreFlushesDuringBoot": true, // "Layers": [{ // "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", // "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" // }], // "HostName": "475c2c58933b", // "MappedDirectories": [], // "SandboxPath": "C:\\\\control\\\\windowsfilter", // "HvPartition": true, // "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], // "HvRuntime": { // "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" // }, // "Servicing": false //} func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, options ...CreateOption) error { clnt.lock(containerID) defer clnt.unlock(containerID) logrus.Debugln("libcontainerd: client.Create() with spec", spec) configuration := &hcsshim.ContainerConfig{ SystemType: "Container", Name: containerID, Owner: defaultOwner, IgnoreFlushesDuringBoot: false, HostName: spec.Hostname, HvPartition: false, } if spec.Windows.Resources != nil { if spec.Windows.Resources.CPU != nil { if spec.Windows.Resources.CPU.Shares != nil { configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) } if spec.Windows.Resources.CPU.Percent != nil { configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000 } } if spec.Windows.Resources.Memory != nil { if spec.Windows.Resources.Memory.Limit != nil { configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 } } if spec.Windows.Resources.Storage != nil { if spec.Windows.Resources.Storage.Bps != nil { configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps } if spec.Windows.Resources.Storage.Iops != nil { configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops } } } var layerOpt *LayerOption for _, option := range options { if s, ok := option.(*ServicingOption); ok { configuration.Servicing = s.IsServicing continue } if f, ok := option.(*FlushOption); ok { configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot continue } if h, ok := option.(*HyperVIsolationOption); ok { configuration.HvPartition = h.IsHyperV configuration.SandboxPath = h.SandboxPath continue } if l, ok := option.(*LayerOption); ok { layerOpt = l } if n, ok := option.(*NetworkEndpointsOption); ok { configuration.EndpointList = n.Endpoints configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery continue } if c, ok := option.(*CredentialsOption); ok { configuration.Credentials = c.Credentials continue } } // We must have a layer option with at least one path if layerOpt == nil || layerOpt.LayerPaths == nil { return fmt.Errorf("no layer option or paths were supplied to the runtime") } if configuration.HvPartition { // Find the upper-most utility VM image, since the utility VM does not // use layering in RS1. // TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable. var uvmImagePath string for _, path := range layerOpt.LayerPaths { fullPath := filepath.Join(path, "UtilityVM") _, err := os.Stat(fullPath) if err == nil { uvmImagePath = fullPath break } if !os.IsNotExist(err) { return err } } if uvmImagePath == "" { return errors.New("utility VM image could not be found") } configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} } else { configuration.VolumePath = spec.Root.Path } configuration.LayerFolderPath = layerOpt.LayerFolderPath for _, layerPath := range layerOpt.LayerPaths { _, filename := filepath.Split(layerPath) g, err := hcsshim.NameToGuid(filename) if err != nil { return err } configuration.Layers = append(configuration.Layers, hcsshim.Layer{ ID: g.ToString(), Path: layerPath, }) } // Add the mounts (volumes, bind mounts etc) to the structure mds := make([]hcsshim.MappedDir, len(spec.Mounts)) for i, mount := range spec.Mounts { mds[i] = hcsshim.MappedDir{ HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: false, } for _, o := range mount.Options { if strings.ToLower(o) == "ro" { mds[i].ReadOnly = true } } } configuration.MappedDirectories = mds hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) if err != nil { return err } // Construct a container object for calling start on it. container := &container{ containerCommon: containerCommon{ process: process{ processCommon: processCommon{ containerID: containerID, client: clnt, friendlyName: InitFriendlyName, }, commandLine: strings.Join(spec.Process.Args, " "), }, processes: make(map[string]*process), }, ociSpec: spec, hcsContainer: hcsContainer, } container.options = options for _, option := range options { if err := option.Apply(container); err != nil { logrus.Errorf("libcontainerd: %v", err) } } // Call start, and if it fails, delete the container from our // internal structure, start will keep HCS in sync by deleting the // container there. logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) if err := container.start(); err != nil { clnt.deleteContainer(containerID) return err } logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) return nil }
// Create is the entrypoint to create a container from a spec, and if successfully // created, start it too. func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) error { logrus.Debugln("libcontainerd: client.Create() with spec", spec) configuration := &hcsshim.ContainerConfig{ SystemType: "Container", Name: containerID, Owner: defaultOwner, VolumePath: spec.Root.Path, IgnoreFlushesDuringBoot: spec.Windows.FirstStart, LayerFolderPath: spec.Windows.LayerFolder, HostName: spec.Hostname, } if spec.Windows.Networking != nil { configuration.EndpointList = spec.Windows.Networking.EndpointList } if spec.Windows.Resources != nil { if spec.Windows.Resources.CPU != nil { if spec.Windows.Resources.CPU.Shares != nil { configuration.ProcessorWeight = *spec.Windows.Resources.CPU.Shares } if spec.Windows.Resources.CPU.Percent != nil { configuration.ProcessorMaximum = *spec.Windows.Resources.CPU.Percent * 100 // ProcessorMaximum is a value between 1 and 10000 } } if spec.Windows.Resources.Memory != nil { if spec.Windows.Resources.Memory.Limit != nil { configuration.MemoryMaximumInMB = *spec.Windows.Resources.Memory.Limit / 1024 / 1024 } } if spec.Windows.Resources.Storage != nil { if spec.Windows.Resources.Storage.Bps != nil { configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps } if spec.Windows.Resources.Storage.Iops != nil { configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops } } } if spec.Windows.HvRuntime != nil { configuration.VolumePath = "" // Always empty for Hyper-V containers configuration.HvPartition = true configuration.HvRuntime = &hcsshim.HvRuntime{ ImagePath: spec.Windows.HvRuntime.ImagePath, } // Images with build version < 14350 don't support running with clone, but // Windows cannot automatically detect this. Explicitly block cloning in this // case. if build := buildFromVersion(spec.Platform.OSVersion); build > 0 && build < 14350 { configuration.HvRuntime.SkipTemplate = true } } if configuration.HvPartition { configuration.SandboxPath = filepath.Dir(spec.Windows.LayerFolder) } else { configuration.VolumePath = spec.Root.Path configuration.LayerFolderPath = spec.Windows.LayerFolder } for _, option := range options { if s, ok := option.(*ServicingOption); ok { configuration.Servicing = s.IsServicing break } } for _, layerPath := range spec.Windows.LayerPaths { _, filename := filepath.Split(layerPath) g, err := hcsshim.NameToGuid(filename) if err != nil { return err } configuration.Layers = append(configuration.Layers, hcsshim.Layer{ ID: g.ToString(), Path: layerPath, }) } // Add the mounts (volumes, bind mounts etc) to the structure mds := make([]hcsshim.MappedDir, len(spec.Mounts)) for i, mount := range spec.Mounts { mds[i] = hcsshim.MappedDir{ HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: mount.Readonly} } configuration.MappedDirectories = mds hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) if err != nil { return err } // Construct a container object for calling start on it. container := &container{ containerCommon: containerCommon{ process: process{ processCommon: processCommon{ containerID: containerID, client: clnt, friendlyName: InitFriendlyName, }, commandLine: strings.Join(spec.Process.Args, " "), }, processes: make(map[string]*process), }, ociSpec: spec, hcsContainer: hcsContainer, } container.options = options for _, option := range options { if err := option.Apply(container); err != nil { logrus.Errorf("libcontainerd: %v", err) } } // Call start, and if it fails, delete the container from our // internal structure, start will keep HCS in sync by deleting the // container there. logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) if err := container.start(); err != nil { clnt.deleteContainer(containerID) return err } logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) return nil }
// Run implements the exec driver Driver interface func (d *Driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { var ( term execdriver.Terminal err error ) // Make sure the client isn't asking for options which aren't supported err = checkSupportedOptions(c) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cu := &containerInit{ SystemType: "Container", Name: c.ID, Owner: defaultOwner, IsDummy: dummyMode, VolumePath: c.Rootfs, IgnoreFlushesDuringBoot: c.FirstStart, LayerFolderPath: c.LayerFolder, } for i := 0; i < len(c.LayerPaths); i++ { _, filename := filepath.Split(c.LayerPaths[i]) g, err := hcsshim.NameToGuid(filename) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } cu.Layers = append(cu.Layers, layer{ ID: g.ToString(), Path: c.LayerPaths[i], }) } // TODO Windows. At some point, when there is CLI on docker run to // enable the IP Address of the container to be passed into docker run, // the IP Address needs to be wired through to HCS in the JSON. It // would be present in c.Network.Interface.IPAddress. See matching // TODO in daemon\container_windows.go, function populateCommand. if c.Network.Interface != nil { var pbs []portBinding // Enumerate through the port bindings specified by the user and convert // them into the internal structure matching the JSON blob that can be // understood by the HCS. for i, v := range c.Network.Interface.PortBindings { proto := strings.ToUpper(i.Proto()) if proto != "TCP" && proto != "UDP" { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid protocol %s", i.Proto()) } if len(v) > 1 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support more than one host port in NAT settings") } for _, v2 := range v { var ( iPort, ePort int err error ) if len(v2.HostIP) != 0 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("Windows does not support host IP addresses in NAT settings") } if ePort, err = strconv.Atoi(v2.HostPort); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid container port %s: %s", v2.HostPort, err) } if iPort, err = strconv.Atoi(i.Port()); err != nil { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("invalid internal port %s: %s", i.Port(), err) } if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 { return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("specified NAT port is not in allowed range") } pbs = append(pbs, portBinding{ExternalPort: ePort, InternalPort: iPort, Protocol: proto}) } } // TODO Windows: TP3 workaround. Allow the user to override the name of // the Container NAT device through an environment variable. This will // ultimately be a global daemon parameter on Windows, similar to -b // for the name of the virtual switch (aka bridge). cn := os.Getenv("DOCKER_CONTAINER_NAT") if len(cn) == 0 { cn = defaultContainerNAT } dev := device{ DeviceType: "Network", Connection: &networkConnection{ NetworkName: c.Network.Interface.Bridge, // TODO Windows: Fixme, next line. Needs HCS fix. EnableNat: false, Nat: natSettings{ Name: cn, PortBindings: pbs, }, }, } if c.Network.Interface.MacAddress != "" { windowsStyleMAC := strings.Replace( c.Network.Interface.MacAddress, ":", "-", -1) dev.Settings = networkSettings{ MacAddress: windowsStyleMAC, } } cu.Devices = append(cu.Devices, dev) } else { logrus.Debugln("No network interface") } configurationb, err := json.Marshal(cu) if err != nil { return execdriver.ExitStatus{ExitCode: -1}, err } configuration := string(configurationb) err = hcsshim.CreateComputeSystem(c.ID, configuration) if err != nil { logrus.Debugln("Failed to create temporary container ", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Start the container logrus.Debugln("Starting container ", c.ID) err = hcsshim.StartComputeSystem(c.ID) if err != nil { logrus.Errorf("Failed to start compute system: %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } defer func() { // Stop the container if terminateMode { logrus.Debugf("Terminating container %s", c.ID) if err := hcsshim.TerminateComputeSystem(c.ID); err != nil { // IMPORTANT: Don't fail if fails to change state. It could already // have been stopped through kill(). // Otherwise, the docker daemon will hang in job wait() logrus.Warnf("Ignoring error from TerminateComputeSystem %s", err) } } else { logrus.Debugf("Shutting down container %s", c.ID) if err := hcsshim.ShutdownComputeSystem(c.ID); err != nil { // IMPORTANT: Don't fail if fails to change state. It could already // have been stopped through kill(). // Otherwise, the docker daemon will hang in job wait() logrus.Warnf("Ignoring error from ShutdownComputeSystem %s", err) } } }() createProcessParms := hcsshim.CreateProcessParams{ EmulateConsole: c.ProcessConfig.Tty, WorkingDirectory: c.WorkingDir, ConsoleSize: c.ProcessConfig.ConsoleSize, } // Configure the environment for the process createProcessParms.Environment = setupEnvironmentVariables(c.ProcessConfig.Env) // This should get caught earlier, but just in case - validate that we // have something to run if c.ProcessConfig.Entrypoint == "" { err = errors.New("No entrypoint specified") logrus.Error(err) return execdriver.ExitStatus{ExitCode: -1}, err } // Build the command line of the process createProcessParms.CommandLine = c.ProcessConfig.Entrypoint for _, arg := range c.ProcessConfig.Arguments { logrus.Debugln("appending ", arg) createProcessParms.CommandLine += " " + arg } logrus.Debugf("CommandLine: %s", createProcessParms.CommandLine) // Start the command running in the container. pid, stdin, stdout, stderr, err := hcsshim.CreateProcessInComputeSystem(c.ID, pipes.Stdin != nil, true, !c.ProcessConfig.Tty, createProcessParms) if err != nil { logrus.Errorf("CreateProcessInComputeSystem() failed %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } // Now that the process has been launched, begin copying data to and from // the named pipes for the std handles. setupPipes(stdin, stdout, stderr, pipes) //Save the PID as we'll need this in Kill() logrus.Debugf("PID %d", pid) c.ContainerPid = int(pid) if c.ProcessConfig.Tty { term = NewTtyConsole(c.ID, pid) } else { term = NewStdConsole() } c.ProcessConfig.Terminal = term // Maintain our list of active containers. We'll need this later for exec // and other commands. d.Lock() d.activeContainers[c.ID] = &activeContainer{ command: c, } d.Unlock() // Invoke the start callback if startCallback != nil { startCallback(&c.ProcessConfig, int(pid)) } var exitCode int32 exitCode, err = hcsshim.WaitForProcessInComputeSystem(c.ID, pid) if err != nil { logrus.Errorf("Failed to WaitForProcessInComputeSystem %s", err) return execdriver.ExitStatus{ExitCode: -1}, err } logrus.Debugf("Exiting Run() exitCode %d id=%s", exitCode, c.ID) return execdriver.ExitStatus{ExitCode: int(exitCode)}, nil }
func main() { if len(os.Args) != 2 { fmt.Print(` This sample create a new container runs ping and then destroys the container. Usage: sample.exe <base container Id> To get the base container id for "microsoft/windowsservercore" use the following PS snippet: Split-Path -Leaf (docker inspect microsoft/windowsservercore | ConvertFrom-Json).GraphDriver.Data.Dir `) os.Exit(1) } windowsbaseId := os.Args[1] di := hcsshim.DriverInfo{ HomeDir: homeDir, Flavour: filterDriver, } imgData, err := hcsshim.GetSharedBaseImages() panicIf(err) fmt.Println(imgData) hcsNets, err := hcsshim.HNSListNetworkRequest("GET", "", "") panicIf(err) fmt.Println(hcsNets) virtualNetworkId := "" for _, n := range hcsNets { if n.Name == "nat" { virtualNetworkId = n.Id } } // https://github.com/docker/libnetwork/blob/f9a1590164b878e668eabf889dd79fb6af8eaced/drivers/windows/windows.go#L284 endpointRequest := hcsshim.HNSEndpoint{ VirtualNetwork: virtualNetworkId, } endpointRequestJson, err := json.Marshal(endpointRequest) panicIf(err) endpoint, err := hcsshim.HNSEndpointRequest("POST", "", string(endpointRequestJson)) panicIf(err) fmt.Println(*endpoint) windowsservercorePath, err := hcsshim.GetLayerMountPath(di, windowsbaseId) panicIf(err) fmt.Println(windowsservercorePath) layerChain, err := GetLayerChain(windowsservercorePath) panicIf(err) fmt.Println(layerChain) newContainerId := stringid.GenerateNonCryptoID() layerFolderPath, volumeMountPath, err := CreateAndActivateContainerLayer(di, newContainerId, windowsservercorePath) panicIf(err) containerConfig := hcsshim.ContainerConfig{ SystemType: "Container", Name: newContainerId, Owner: "Garden", LayerFolderPath: layerFolderPath, VolumePath: volumeMountPath, IgnoreFlushesDuringBoot: true, EndpointList: []string{endpoint.Id}, } // https://github.com/docker/docker/blob/cf58eb437c4229e876f2d952a228b603a074e584/libcontainerd/client_windows.go#L111-L121 for _, layerPath := range layerChain { id, err := hcsshim.NameToGuid(GetLayerId(layerPath)) panicIf(err) containerConfig.Layers = append(containerConfig.Layers, hcsshim.Layer{ Path: layerPath, ID: id.ToString(), }) } c, err := hcsshim.CreateContainer(newContainerId, &containerConfig) panicIf(err) fmt.Println(c) err = c.Start() panicIf(err) stats, err := c.Statistics() panicIf(err) fmt.Println(stats) processConfig := hcsshim.ProcessConfig{ CommandLine: "ping 127.0.0.1", WorkingDirectory: "C:\\", //CreateStdErrPipe: true, //CreateStdInPipe: true, //CreateStdOutPipe: true, } p, err := c.CreateProcess(&processConfig) panicIf(err) fmt.Println(p) err = p.Wait() panicIf(err) err = c.Shutdown() warnIf(err) err = c.Terminate() warnIf(err) endpoint, err = hcsshim.HNSEndpointRequest("DELETE", endpoint.Id, "") warnIf(err) err = hcsshim.UnprepareLayer(di, newContainerId) warnIf(err) err = hcsshim.DeactivateLayer(di, newContainerId) warnIf(err) err = hcsshim.DestroyLayer(di, newContainerId) warnIf(err) }
func main() { if len(os.Args) != 2 { fmt.Print(` This sample create a new container runs ping and then destroys the container. Usage: sample.exe <base container Id> To get the base container id for "microsoft/windowsservercore" use the following PS snippet: Split-Path -Leaf (docker inspect microsoft/windowsservercore | ConvertFrom-Json).GraphDriver.Data.Dir `) os.Exit(1) } windowsbaseId := os.Args[1] guid, err := hcsshim.NameToGuid(windowsbaseId) panicIf(err) windowsbaseGuid := guid.ToString() di := hcsshim.DriverInfo{ HomeDir: homeDir, Flavour: filterDriver, } imgData, err := hcsshim.GetSharedBaseImages() panicIf(err) fmt.Println(imgData) windowsservercorePath, err := hcsshim.GetLayerMountPath(di, windowsbaseId) panicIf(err) fmt.Println(windowsservercorePath) newContainerId := stringid.GenerateNonCryptoID() layerFolderPath, volumeMountPath, err := CreateAndActivateContainerLayer(di, newContainerId, windowsservercorePath) panicIf(err) containerConfig := hcsshim.ContainerConfig{ SystemType: "Container", Name: newContainerId, Owner: "Garden", LayerFolderPath: layerFolderPath, VolumePath: volumeMountPath, Layers: []hcsshim.Layer{ hcsshim.Layer{Path: windowsservercorePath, ID: windowsbaseGuid}, }, IgnoreFlushesDuringBoot: true, } c, err := hcsshim.CreateContainer(newContainerId, &containerConfig) panicIf(err) fmt.Println(c) err = c.Start() panicIf(err) stats, err := c.Statistics() panicIf(err) fmt.Println(stats) processConfig := hcsshim.ProcessConfig{ CommandLine: "ping 127.0.0.1", WorkingDirectory: "C:\\", //CreateStdErrPipe: true, //CreateStdInPipe: true, //CreateStdOutPipe: true, } p, err := c.CreateProcess(&processConfig) panicIf(err) fmt.Println(p) err = p.Wait() panicIf(err) err = c.Shutdown() warnIf(err) err = c.Terminate() warnIf(err) err = hcsshim.UnprepareLayer(di, newContainerId) warnIf(err) err = hcsshim.DeactivateLayer(di, newContainerId) warnIf(err) err = hcsshim.DestroyLayer(di, newContainerId) warnIf(err) }
// Create is the entrypoint to create a container from a spec, and if successfully // created, start it too. func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) error { logrus.Debugln("LCD client.Create() with spec", spec) cu := &containerInit{ SystemType: "Container", Name: containerID, Owner: defaultOwner, VolumePath: spec.Root.Path, IgnoreFlushesDuringBoot: spec.Windows.FirstStart, LayerFolderPath: spec.Windows.LayerFolder, HostName: spec.Hostname, } if spec.Windows.Networking != nil { cu.EndpointList = spec.Windows.Networking.EndpointList } if spec.Windows.Resources != nil { if spec.Windows.Resources.CPU != nil { if spec.Windows.Resources.CPU.Shares != nil { cu.ProcessorWeight = *spec.Windows.Resources.CPU.Shares } if spec.Windows.Resources.CPU.Percent != nil { cu.ProcessorMaximum = *spec.Windows.Resources.CPU.Percent * 100 // ProcessorMaximum is a value between 1 and 10000 } } if spec.Windows.Resources.Memory != nil { if spec.Windows.Resources.Memory.Limit != nil { cu.MemoryMaximumInMB = *spec.Windows.Resources.Memory.Limit / 1024 / 1024 } } if spec.Windows.Resources.Storage != nil { if spec.Windows.Resources.Storage.Bps != nil { cu.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps } if spec.Windows.Resources.Storage.Iops != nil { cu.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops } if spec.Windows.Resources.Storage.SandboxSize != nil { cu.StorageSandboxSize = *spec.Windows.Resources.Storage.SandboxSize } } } cu.HvPartition = (spec.Windows.HvRuntime != nil) // TODO Windows @jhowardmsft. FIXME post TP5. // if spec.Windows.HvRuntime != nil { // if spec.WIndows.HVRuntime.ImagePath != "" { // cu.TBD = spec.Windows.HvRuntime.ImagePath // } // } if cu.HvPartition { cu.SandboxPath = filepath.Dir(spec.Windows.LayerFolder) } else { cu.VolumePath = spec.Root.Path cu.LayerFolderPath = spec.Windows.LayerFolder } for _, layerPath := range spec.Windows.LayerPaths { _, filename := filepath.Split(layerPath) g, err := hcsshim.NameToGuid(filename) if err != nil { return err } cu.Layers = append(cu.Layers, layer{ ID: g.ToString(), Path: layerPath, }) } // Add the mounts (volumes, bind mounts etc) to the structure mds := make([]mappedDir, len(spec.Mounts)) for i, mount := range spec.Mounts { mds[i] = mappedDir{ HostPath: mount.Source, ContainerPath: mount.Destination, ReadOnly: mount.Readonly} } cu.MappedDirectories = mds // TODO Windows: vv START OF TP4 BLOCK OF CODE. REMOVE ONCE TP4 IS NO LONGER SUPPORTED if hcsshim.IsTP4() && spec.Windows.Networking != nil && spec.Windows.Networking.Bridge != "" { // Enumerate through the port bindings specified by the user and convert // them into the internal structure matching the JSON blob that can be // understood by the HCS. var pbs []portBinding for i, v := range spec.Windows.Networking.PortBindings { proto := strings.ToUpper(i.Proto()) if proto != "TCP" && proto != "UDP" { return fmt.Errorf("invalid protocol %s", i.Proto()) } if len(v) > 1 { return fmt.Errorf("Windows does not support more than one host port in NAT settings") } for _, v2 := range v { var ( iPort, ePort int err error ) if len(v2.HostIP) != 0 { return fmt.Errorf("Windows does not support host IP addresses in NAT settings") } if ePort, err = strconv.Atoi(v2.HostPort); err != nil { return fmt.Errorf("invalid container port %s: %s", v2.HostPort, err) } if iPort, err = strconv.Atoi(i.Port()); err != nil { return fmt.Errorf("invalid internal port %s: %s", i.Port(), err) } if iPort < 0 || iPort > 65535 || ePort < 0 || ePort > 65535 { return fmt.Errorf("specified NAT port is not in allowed range") } pbs = append(pbs, portBinding{ExternalPort: ePort, InternalPort: iPort, Protocol: proto}) } } dev := device{ DeviceType: "Network", Connection: &networkConnection{ NetworkName: spec.Windows.Networking.Bridge, Nat: natSettings{ Name: defaultContainerNAT, PortBindings: pbs, }, }, } if spec.Windows.Networking.MacAddress != "" { windowsStyleMAC := strings.Replace( spec.Windows.Networking.MacAddress, ":", "-", -1) dev.Settings = networkSettings{ MacAddress: windowsStyleMAC, } } cu.Devices = append(cu.Devices, dev) } else { logrus.Debugln("No network interface") } // TODO Windows: ^^ END OF TP4 BLOCK OF CODE. REMOVE ONCE TP4 IS NO LONGER SUPPORTED configurationb, err := json.Marshal(cu) if err != nil { return err } configuration := string(configurationb) // TODO Windows TP5 timeframe. Remove when TP4 is no longer supported. // The following a workaround for Windows TP4 which has a networking // bug which fairly frequently returns an error. Back off and retry. if !hcsshim.IsTP4() { if err := hcsshim.CreateComputeSystem(containerID, configuration); err != nil { return err } } else { maxAttempts := 5 for i := 1; i <= maxAttempts; i++ { err = hcsshim.CreateComputeSystem(containerID, configuration) if err == nil { break } if herr, ok := err.(*hcsshim.HcsError); ok { if herr.Err != syscall.ERROR_NOT_FOUND && // Element not found herr.Err != syscall.ERROR_FILE_NOT_FOUND && // The system cannot find the file specified herr.Err != ErrorNoNetwork && // The network is not present or not started herr.Err != ErrorBadPathname && // The specified path is invalid herr.Err != CoEClassstring && // Invalid class string herr.Err != ErrorInvalidObject { // The object identifier does not represent a valid object logrus.Debugln("Failed to create temporary container ", err) return err } logrus.Warnf("Invoking Windows TP4 retry hack (%d of %d)", i, maxAttempts-1) time.Sleep(50 * time.Millisecond) } } } // Construct a container object for calling start on it. container := &container{ containerCommon: containerCommon{ process: process{ processCommon: processCommon{ containerID: containerID, client: clnt, friendlyName: InitFriendlyName, }, }, processes: make(map[string]*process), }, ociSpec: spec, } container.options = options for _, option := range options { if err := option.Apply(container); err != nil { logrus.Error(err) } } // Call start, and if it fails, delete the container from our // internal structure, and also keep HCS in sync by deleting the // container there. logrus.Debugf("Create() id=%s, Calling start()", containerID) if err := container.start(); err != nil { clnt.deleteContainer(containerID) return err } logrus.Debugf("Create() id=%s completed successfully", containerID) return nil }