func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { //将通过--link相连的容器中的信息获取过来,然后将其中的信息转成环境变量(是[]string数组的形式,每一个元素类似于"NAME=xxxx")的形式 linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return err } //设置容器的目录 s.Root = specs.Root{ Path: c.BaseFS, Readonly: c.HostConfig.ReadonlyRootfs, } //设置目录的权限。 rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return err } cwd := c.Config.WorkingDir if len(cwd) == 0 { cwd = "/" } //设置容器进程的参数、执行目录、环境变量、终端、主机名。 s.Process.Args = append([]string{c.Path}, c.Args...) s.Process.Cwd = cwd s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) s.Process.Terminal = c.Config.Tty s.Hostname = c.FullHostname() return nil }
func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return err } s.Root = specs.Root{ Path: c.BaseFS, Readonly: c.HostConfig.ReadonlyRootfs, } rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return err } cwd := c.Config.WorkingDir if len(cwd) == 0 { cwd = "/" } s.Process.Args = append([]string{c.Path}, c.Args...) s.Process.Cwd = cwd s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) s.Process.Terminal = c.Config.Tty s.Hostname = c.FullHostname() return nil }
func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return err } s.Root = specs.Root{ Path: c.BaseFS, Readonly: c.HostConfig.ReadonlyRootfs, } rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return err } cwd := c.Config.WorkingDir if len(cwd) == 0 { cwd = "/" } s.Process.Args = append([]string{c.Path}, c.Args...) // only add the custom init if it is specified and the container is running in its // own private pid namespace. It does not make sense to add if it is running in the // host namespace or another container's pid namespace where we already have an init if c.HostConfig.PidMode.IsPrivate() { if (c.HostConfig.Init != nil && *c.HostConfig.Init) || (c.HostConfig.Init == nil && daemon.configStore.Init) { s.Process.Args = append([]string{"/dev/init", c.Path}, c.Args...) var path string if daemon.configStore.InitPath == "" && c.HostConfig.InitPath == "" { path, err = exec.LookPath(DefaultInitBinary) if err != nil { return err } } if daemon.configStore.InitPath != "" { path = daemon.configStore.InitPath } if c.HostConfig.InitPath != "" { path = c.HostConfig.InitPath } s.Mounts = append(s.Mounts, specs.Mount{ Destination: "/dev/init", Type: "bind", Source: path, Options: []string{"bind", "ro"}, }) } } s.Process.Cwd = cwd s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) s.Process.Terminal = c.Config.Tty s.Hostname = c.FullHostname() return nil }
// createContainerPlatformSpecificSettings performs platform specific container create functionality func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig, img *image.Image) error { if err := daemon.Mount(container); err != nil { return err } defer daemon.Unmount(container) if err := container.SetupWorkingDirectory(); err != nil { return err } for spec := range config.Volumes { name := stringid.GenerateNonCryptoID() destination := filepath.Clean(spec) // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(destination) { continue } path, err := container.GetResourcePath(destination) if err != nil { return err } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { return derr.ErrorCodeMountOverFile.WithArgs(path) } volumeDriver := hostConfig.VolumeDriver if destination != "" && img != nil { if _, ok := img.ContainerConfig.Volumes[destination]; ok { // check for whether bind is not specified and then set to local if _, ok := container.MountPoints[destination]; !ok { volumeDriver = volume.DefaultDriverName } } } v, err := daemon.volumes.CreateWithRef(name, volumeDriver, container.ID, nil) if err != nil { return err } if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { return err } container.AddMountPointWithVolume(destination, v, true) } return daemon.populateVolumes(container) }
// createContainerPlatformSpecificSettings performs platform specific container create functionality func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { if err := daemon.Mount(container); err != nil { return err } defer daemon.Unmount(container) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { return err } for spec := range config.Volumes { name := stringid.GenerateNonCryptoID() destination := filepath.Clean(spec) // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(destination) { continue } path, err := container.GetResourcePath(destination) if err != nil { return err } stat, err := os.Stat(path) if err == nil && !stat.IsDir() { return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) } v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) if err != nil { return err } if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { return err } container.AddMountPointWithVolume(destination, v, true) } return daemon.populateVolumes(container) }
// containerStart prepares the container to run by setting up everything the // container needs, such as storage and networking, as well as links // between containers. The container is left waiting for a signal to // begin running. func (daemon *Daemon) containerStart(container *container.Container) (err error) { container.Lock() defer container.Unlock() if container.Running { return nil } if container.RemovalInProgress || container.Dead { return derr.ErrorCodeContainerBeingRemoved } // if we encounter an error during start we need to ensure that any other // setup has been cleaned up properly defer func() { if err != nil { container.SetError(err) // if no one else has set it, make sure we don't leave it at zero if container.ExitCode == 0 { container.ExitCode = 128 } container.ToDisk() daemon.Cleanup(container) daemon.LogContainerEvent(container, "die") } }() if err := daemon.conditionalMountOnStart(container); err != nil { return err } // Make sure NetworkMode has an acceptable value. We do this to ensure // backwards API compatibility. container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) if err := daemon.initializeNetworking(container); err != nil { return err } linkedEnv, err := daemon.setupLinkedContainers(container) if err != nil { return err } if err := container.SetupWorkingDirectory(); err != nil { return err } env := container.CreateDaemonEnvironment(linkedEnv) if err := daemon.populateCommand(container, env); err != nil { return err } if !container.HostConfig.IpcMode.IsContainer() && !container.HostConfig.IpcMode.IsHost() { if err := daemon.setupIpcDirs(container); err != nil { return err } } mounts, err := daemon.setupMounts(container) if err != nil { return err } mounts = append(mounts, container.IpcMounts()...) mounts = append(mounts, container.TmpfsMounts()...) container.Command.Mounts = mounts if err := daemon.waitForStart(container); err != nil { return err } container.HasBeenStartedBefore = true return nil }
func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { s := oci.DefaultSpec() linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return nil, err } // TODO Windows - this can be removed. Not used (UID/GID) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return nil, err } img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return nil, fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) } // In base spec s.Hostname = c.FullHostname() // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } for _, mount := range mounts { s.Mounts = append(s.Mounts, windowsoci.Mount{ Source: mount.Source, Destination: mount.Destination, Readonly: !mount.Writable, }) } // In s.Process s.Process.Args = append([]string{c.Path}, c.Args...) if !c.Config.ArgsEscaped { s.Process.Args = escapeArgs(s.Process.Args) } s.Process.Cwd = c.Config.WorkingDir s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) s.Process.InitialConsoleSize = c.HostConfig.ConsoleSize s.Process.Terminal = c.Config.Tty s.Process.User.User = c.Config.User // In spec.Root s.Root.Path = c.BaseFS s.Root.Readonly = c.HostConfig.ReadonlyRootfs // In s.Windows s.Windows.FirstStart = !c.HasBeenStartedBefore // s.Windows.LayerFolder. m, err := c.RWLayer.Metadata() if err != nil { return nil, fmt.Errorf("Failed to get layer metadata - %s", err) } s.Windows.LayerFolder = m["dir"] // s.Windows.LayerPaths var layerPaths []string if img.RootFS != nil && (img.RootFS.Type == image.TypeLayers || img.RootFS.Type == image.TypeLayersWithBase) { // Get the layer path for each layer. start := 1 if img.RootFS.Type == image.TypeLayersWithBase { // Include an empty slice to get the base layer ID. start = 0 } max := len(img.RootFS.DiffIDs) for i := start; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return nil, fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } s.Windows.LayerPaths = layerPaths // Are we going to run as a Hyper-V container? hv := false if c.HostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration hv = daemon.defaultIsolation.IsHyperV() } else { // Container is requesting an isolation mode. Honour it. hv = c.HostConfig.Isolation.IsHyperV() } if hv { hvr := &windowsoci.HvRuntime{} if img.RootFS != nil && img.RootFS.Type == image.TypeLayers { // For TP5, the utility VM is part of the base layer. // TODO-jstarks: Add support for separate utility VM images // once it is decided how they can be stored. uvmpath := filepath.Join(layerPaths[len(layerPaths)-1], "UtilityVM") _, err = os.Stat(uvmpath) if err != nil { if os.IsNotExist(err) { err = errors.New("container image does not contain a utility VM") } return nil, err } hvr.ImagePath = uvmpath } s.Windows.HvRuntime = hvr } // In s.Windows.Networking // Connect all the libnetwork allocated networks to the container var epList []string if c.NetworkSettings != nil { for n := range c.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := c.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } } } s.Windows.Networking = &windowsoci.Networking{ EndpointList: epList, } // In s.Windows.Resources // @darrenstahlmsft implement these resources cpuShares := uint64(c.HostConfig.CPUShares) s.Windows.Resources = &windowsoci.Resources{ CPU: &windowsoci.CPU{ //TODO Count: ..., //TODO Percent: ..., Shares: &cpuShares, }, Memory: &windowsoci.Memory{ //TODO Limit: ..., //TODO Reservation: ..., }, Network: &windowsoci.Network{ //TODO Bandwidth: ..., }, Storage: &windowsoci.Storage{ //TODO Bps: ..., //TODO Iops: ..., //TODO SandboxSize: ..., }, } return (*libcontainerd.Spec)(&s), nil }
func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { s := oci.DefaultSpec() linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return nil, err } // TODO Windows - this can be removed. Not used (UID/GID) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return nil, err } // In base spec s.Hostname = c.FullHostname() // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } for _, mount := range mounts { m := specs.Mount{ Source: mount.Source, Destination: mount.Destination, } if !mount.Writable { m.Options = append(m.Options, "ro") } s.Mounts = append(s.Mounts, m) } // In s.Process s.Process.Args = append([]string{c.Path}, c.Args...) if !c.Config.ArgsEscaped { s.Process.Args = escapeArgs(s.Process.Args) } s.Process.Cwd = c.Config.WorkingDir if len(s.Process.Cwd) == 0 { // We default to C:\ to workaround the oddity of the case that the // default directory for cmd running as LocalSystem (or // ContainerAdministrator) is c:\windows\system32. Hence docker run // <image> cmd will by default end in c:\windows\system32, rather // than 'root' (/) on Linux. The oddity is that if you have a dockerfile // which has no WORKDIR and has a COPY file ., . will be interpreted // as c:\. Hence, setting it to default of c:\ makes for consistency. s.Process.Cwd = `C:\` } s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] s.Process.Terminal = c.Config.Tty s.Process.User.Username = c.Config.User // In spec.Root. This is not set for Hyper-V containers isHyperV := false if c.HostConfig.Isolation.IsDefault() { // Container using default isolation, so take the default from the daemon configuration isHyperV = daemon.defaultIsolation.IsHyperV() } else { // Container may be requesting an explicit isolation mode. isHyperV = c.HostConfig.Isolation.IsHyperV() } if !isHyperV { s.Root.Path = c.BaseFS } s.Root.Readonly = false // Windows does not support a read-only root filesystem // In s.Windows.Resources // @darrenstahlmsft implement these resources cpuShares := uint16(c.HostConfig.CPUShares) cpuPercent := uint8(c.HostConfig.CPUPercent) if c.HostConfig.NanoCPUs > 0 { cpuPercent = uint8(c.HostConfig.NanoCPUs * 100 / int64(sysinfo.NumCPU()) / 1e9) } cpuCount := uint64(c.HostConfig.CPUCount) memoryLimit := uint64(c.HostConfig.Memory) s.Windows.Resources = &specs.WindowsResources{ CPU: &specs.WindowsCPUResources{ Percent: &cpuPercent, Shares: &cpuShares, Count: &cpuCount, }, Memory: &specs.WindowsMemoryResources{ Limit: &memoryLimit, //TODO Reservation: ..., }, Network: &specs.WindowsNetworkResources{ //TODO Bandwidth: ..., }, Storage: &specs.WindowsStorageResources{ Bps: &c.HostConfig.IOMaximumBandwidth, Iops: &c.HostConfig.IOMaximumIOps, }, } return (*specs.Spec)(&s), nil }
func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { s := oci.DefaultSpec() linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return nil, err } // TODO Windows - this can be removed. Not used (UID/GID) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return nil, err } img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return nil, fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) } // In base spec s.Hostname = c.FullHostname() // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } for _, mount := range mounts { s.Mounts = append(s.Mounts, windowsoci.Mount{ Source: mount.Source, Destination: mount.Destination, Readonly: !mount.Writable, }) } // Are we going to run as a Hyper-V container? hv := false if c.HostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration hv = daemon.defaultIsolation.IsHyperV() } else { // Container is requesting an isolation mode. Honour it. hv = c.HostConfig.Isolation.IsHyperV() } if hv { // TODO We don't yet have the ImagePath hooked up. But set to // something non-nil to pickup in libcontainerd. s.Windows.HvRuntime = &windowsoci.HvRuntime{} } // In s.Process if c.Config.ArgsEscaped { s.Process.Args = append([]string{c.Path}, c.Args...) } else { // TODO (jstarks): escape the entrypoint too once the tests are fixed to not rely on this behavior s.Process.Args = append([]string{c.Path}, escapeArgs(c.Args)...) } s.Process.Cwd = c.Config.WorkingDir s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) s.Process.InitialConsoleSize = c.HostConfig.ConsoleSize s.Process.Terminal = c.Config.Tty s.Process.User.User = c.Config.User // In spec.Root s.Root.Path = c.BaseFS s.Root.Readonly = c.HostConfig.ReadonlyRootfs // In s.Windows s.Windows.FirstStart = !c.HasBeenStartedBefore // s.Windows.LayerFolder. m, err := c.RWLayer.Metadata() if err != nil { return nil, fmt.Errorf("Failed to get layer metadata - %s", err) } s.Windows.LayerFolder = m["dir"] // s.Windows.LayerPaths var layerPaths []string if img.RootFS != nil && img.RootFS.Type == "layers+base" { max := len(img.RootFS.DiffIDs) for i := 0; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return nil, fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } s.Windows.LayerPaths = layerPaths // In s.Windows.Networking (TP5+ libnetwork way of doing things) // Connect all the libnetwork allocated networks to the container var epList []string if c.NetworkSettings != nil { for n := range c.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := c.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } } } s.Windows.Networking = &windowsoci.Networking{ EndpointList: epList, } // In s.Windows.Networking (TP4 back compat) // TODO Windows: Post TP4 - Remove this along with definitions from spec // and changes to libcontainerd to not read these fields. if daemon.netController == nil { parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { s.Windows.Networking = &windowsoci.Networking{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.bridgeConfig.Iface, PortBindings: c.HostConfig.PortBindings, } } default: return nil, fmt.Errorf("invalid network mode: %s", c.HostConfig.NetworkMode) } } // In s.Windows.Resources // @darrenstahlmsft implement these resources cpuShares := uint64(c.HostConfig.CPUShares) s.Windows.Resources = &windowsoci.Resources{ CPU: &windowsoci.CPU{ //TODO Count: ..., //TODO Percent: ..., Shares: &cpuShares, }, Memory: &windowsoci.Memory{ //TODO Limit: ..., //TODO Reservation: ..., }, Network: &windowsoci.Network{ //TODO Bandwidth: ..., }, Storage: &windowsoci.Storage{ //TODO Bps: ..., //TODO Iops: ..., //TODO SandboxSize: ..., }, } return (*libcontainerd.Spec)(&s), nil }
// createContainerPlatformSpecificSettings performs platform specific container create functionality func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { // Make sure the host config has the default daemon isolation if not specified by caller. if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { hostConfig.Isolation = daemon.defaultIsolation } if err := daemon.Mount(container); err != nil { return nil } defer daemon.Unmount(container) if err := container.SetupWorkingDirectory(0, 0); err != nil { return err } for spec := range config.Volumes { mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver) if err != nil { return fmt.Errorf("Unrecognised volume spec: %v", err) } // If the mountpoint doesn't have a name, generate one. if len(mp.Name) == 0 { mp.Name = stringid.GenerateNonCryptoID() } // Skip volumes for which we already have something mounted on that // destination because of a --volume-from. if container.IsDestinationMounted(mp.Destination) { continue } volumeDriver := hostConfig.VolumeDriver // Create the volume in the volume driver. If it doesn't exist, // a new one will be created. v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) if err != nil { return err } // FIXME Windows: This code block is present in the Linux version and // allows the contents to be copied to the container FS prior to it // being started. However, the function utilizes the FollowSymLinkInScope // path which does not cope with Windows volume-style file paths. There // is a separate effort to resolve this (@swernli), so this processing // is deferred for now. A case where this would be useful is when // a dockerfile includes a VOLUME statement, but something is created // in that directory during the dockerfile processing. What this means // on Windows for TP5 is that in that scenario, the contents will not // copied, but that's (somewhat) OK as HCS will bomb out soon after // at it doesn't support mapped directories which have contents in the // destination path anyway. // // Example for repro later: // FROM windowsservercore // RUN mkdir c:\myvol // RUN copy c:\windows\system32\ntdll.dll c:\myvol // VOLUME "c:\myvol" // // Then // docker build -t vol . // docker run -it --rm vol cmd <-- This is where HCS will error out. // // // never attempt to copy existing content in a container FS to a shared volume // if v.DriverName() == volume.DefaultDriverName { // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { // return err // } // } // Add it to container.MountPoints container.AddMountPointWithVolume(mp.Destination, v, mp.RW) } return nil }
func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { s := oci.DefaultSpec() linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return nil, err } // TODO Windows - this can be removed. Not used (UID/GID) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return nil, err } // In base spec s.Hostname = c.FullHostname() // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } for _, mount := range mounts { m := windowsoci.Mount{ Source: mount.Source, Destination: mount.Destination, } if !mount.Writable { m.Options = append(m.Options, "ro") } s.Mounts = append(s.Mounts, m) } // In s.Process s.Process.Args = append([]string{c.Path}, c.Args...) if !c.Config.ArgsEscaped { s.Process.Args = escapeArgs(s.Process.Args) } s.Process.Cwd = c.Config.WorkingDir if len(s.Process.Cwd) == 0 { // We default to C:\ to workaround the oddity of the case that the // default directory for cmd running as LocalSystem (or // ContainerAdministrator) is c:\windows\system32. Hence docker run // <image> cmd will by default end in c:\windows\system32, rather // than 'root' (/) on Linux. The oddity is that if you have a dockerfile // which has no WORKDIR and has a COPY file ., . will be interpreted // as c:\. Hence, setting it to default of c:\ makes for consistency. s.Process.Cwd = `C:\` } s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) s.Process.ConsoleSize.Height = c.HostConfig.ConsoleSize[0] s.Process.ConsoleSize.Width = c.HostConfig.ConsoleSize[1] s.Process.Terminal = c.Config.Tty s.Process.User.Username = c.Config.User // In spec.Root s.Root.Path = c.BaseFS s.Root.Readonly = c.HostConfig.ReadonlyRootfs // In s.Windows.Resources // @darrenstahlmsft implement these resources cpuShares := uint64(c.HostConfig.CPUShares) s.Windows.Resources = &windowsoci.WindowsResources{ CPU: &windowsoci.WindowsCPU{ Percent: &c.HostConfig.CPUPercent, Shares: &cpuShares, }, Memory: &windowsoci.WindowsMemory{ Limit: &c.HostConfig.Memory, //TODO Reservation: ..., }, Network: &windowsoci.WindowsNetwork{ //TODO Bandwidth: ..., }, Storage: &windowsoci.WindowsStorage{ Bps: &c.HostConfig.IOMaximumBandwidth, Iops: &c.HostConfig.IOMaximumIOps, }, } return (*libcontainerd.Spec)(&s), nil }