func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (*[]libcontainerd.CreateOption, error) { createOptions := []libcontainerd.CreateOption{} // Are we going to run as a Hyper-V container? hvOpts := &libcontainerd.HyperVIsolationOption{} if container.HostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() } else { // Container is requesting an isolation mode. Honour it. hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() } // Generate the layer folder of the layer options layerOpts := &libcontainerd.LayerOption{} m, err := container.RWLayer.Metadata() if err != nil { return nil, fmt.Errorf("failed to get layer metadata - %s", err) } if hvOpts.IsHyperV { hvOpts.SandboxPath = filepath.Dir(m["dir"]) } else { layerOpts.LayerFolderPath = m["dir"] } // Generate the layer paths of the layer options img, err := daemon.imageStore.Get(container.ImageID) if err != nil { return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) } // Get the layer path for each layer. max := len(img.RootFS.DiffIDs) for i := 1; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) } // Now build the full set of options createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) createOptions = append(createOptions, hvOpts) createOptions = append(createOptions, layerOpts) return &createOptions, nil }
func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { s := oci.DefaultSpec() linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return nil, err } // TODO Windows - this can be removed. Not used (UID/GID) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return nil, err } img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return nil, fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) } // In base spec s.Hostname = c.FullHostname() // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } for _, mount := range mounts { s.Mounts = append(s.Mounts, windowsoci.Mount{ Source: mount.Source, Destination: mount.Destination, Readonly: !mount.Writable, }) } // In s.Process s.Process.Args = append([]string{c.Path}, c.Args...) if !c.Config.ArgsEscaped { s.Process.Args = escapeArgs(s.Process.Args) } s.Process.Cwd = c.Config.WorkingDir s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) s.Process.InitialConsoleSize = c.HostConfig.ConsoleSize s.Process.Terminal = c.Config.Tty s.Process.User.User = c.Config.User // In spec.Root s.Root.Path = c.BaseFS s.Root.Readonly = c.HostConfig.ReadonlyRootfs // In s.Windows s.Windows.FirstStart = !c.HasBeenStartedBefore // s.Windows.LayerFolder. m, err := c.RWLayer.Metadata() if err != nil { return nil, fmt.Errorf("Failed to get layer metadata - %s", err) } s.Windows.LayerFolder = m["dir"] // s.Windows.LayerPaths var layerPaths []string if img.RootFS != nil && (img.RootFS.Type == image.TypeLayers || img.RootFS.Type == image.TypeLayersWithBase) { // Get the layer path for each layer. start := 1 if img.RootFS.Type == image.TypeLayersWithBase { // Include an empty slice to get the base layer ID. start = 0 } max := len(img.RootFS.DiffIDs) for i := start; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return nil, fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } s.Windows.LayerPaths = layerPaths // Are we going to run as a Hyper-V container? hv := false if c.HostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration hv = daemon.defaultIsolation.IsHyperV() } else { // Container is requesting an isolation mode. Honour it. hv = c.HostConfig.Isolation.IsHyperV() } if hv { hvr := &windowsoci.HvRuntime{} if img.RootFS != nil && img.RootFS.Type == image.TypeLayers { // For TP5, the utility VM is part of the base layer. // TODO-jstarks: Add support for separate utility VM images // once it is decided how they can be stored. uvmpath := filepath.Join(layerPaths[len(layerPaths)-1], "UtilityVM") _, err = os.Stat(uvmpath) if err != nil { if os.IsNotExist(err) { err = errors.New("container image does not contain a utility VM") } return nil, err } hvr.ImagePath = uvmpath } s.Windows.HvRuntime = hvr } // In s.Windows.Networking // Connect all the libnetwork allocated networks to the container var epList []string if c.NetworkSettings != nil { for n := range c.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := c.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } } } s.Windows.Networking = &windowsoci.Networking{ EndpointList: epList, } // In s.Windows.Resources // @darrenstahlmsft implement these resources cpuShares := uint64(c.HostConfig.CPUShares) s.Windows.Resources = &windowsoci.Resources{ CPU: &windowsoci.CPU{ //TODO Count: ..., //TODO Percent: ..., Shares: &cpuShares, }, Memory: &windowsoci.Memory{ //TODO Limit: ..., //TODO Reservation: ..., }, Network: &windowsoci.Network{ //TODO Bandwidth: ..., }, Storage: &windowsoci.Storage{ //TODO Bps: ..., //TODO Iops: ..., //TODO SandboxSize: ..., }, } return (*libcontainerd.Spec)(&s), nil }
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error { en := &execdriver.Network{ Interface: nil, } parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.bridgeConfig.VirtualSwitchName, PortBindings: c.HostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a // property IPAddress on execDrive.CommonNetworkInterface, // but there is no CLI option in docker to pass through // an IPAddress on docker run. } } default: return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.HostConfig.NetworkMode) } // TODO Windows. More resource controls to be implemented later. resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ CPUShares: c.HostConfig.CPUShares, }, } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, ConsoleSize: c.HostConfig.ConsoleSize, } processConfig.Env = env var layerPaths []string img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) } if img.RootFS != nil && img.RootFS.Type == "layers+base" { max := len(img.RootFS.DiffIDs) for i := 0; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return derr.ErrorCodeGetLayer.WithArgs(err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } m, err := c.RWLayer.Metadata() if err != nil { return derr.ErrorCodeGetLayerMetadata.WithArgs(err) } layerFolder := m["dir"] var hvPartition bool // Work out the isolation (whether it is a hypervisor partition) if c.HostConfig.Isolation.IsDefault() { // Not specified by caller. Take daemon default hvPartition = windows.DefaultIsolation.IsHyperV() } else { // Take value specified by caller hvPartition = c.HostConfig.Isolation.IsHyperV() } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, Rootfs: c.BaseFS, WorkingDir: c.Config.WorkingDir, Network: en, MountLabel: c.GetMountLabel(), Resources: resources, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), }, FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, Hostname: c.Config.Hostname, Isolation: string(c.HostConfig.Isolation), ArgsEscaped: c.Config.ArgsEscaped, HvPartition: hvPartition, } return nil }
func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (*[]libcontainerd.CreateOption, error) { createOptions := []libcontainerd.CreateOption{} // Are we going to run as a Hyper-V container? hvOpts := &libcontainerd.HyperVIsolationOption{} if container.HostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() } else { // Container is requesting an isolation mode. Honour it. hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() } // Generate the layer folder of the layer options layerOpts := &libcontainerd.LayerOption{} m, err := container.RWLayer.Metadata() if err != nil { return nil, fmt.Errorf("failed to get layer metadata - %s", err) } if hvOpts.IsHyperV { hvOpts.SandboxPath = filepath.Dir(m["dir"]) } layerOpts.LayerFolderPath = m["dir"] // Generate the layer paths of the layer options img, err := daemon.imageStore.Get(container.ImageID) if err != nil { return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) } // Get the layer path for each layer. max := len(img.RootFS.DiffIDs) for i := 1; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) } // Get endpoints for the libnetwork allocated networks to the container var epList []string AllowUnqualifiedDNSQuery := false if container.NetworkSettings != nil { for n := range container.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := container.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } if data["AllowUnqualifiedDNSQuery"] != nil { AllowUnqualifiedDNSQuery = true } } } // Now build the full set of options createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) createOptions = append(createOptions, hvOpts) createOptions = append(createOptions, layerOpts) if epList != nil { createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{Endpoints: epList, AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery}) } return &createOptions, nil }
func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) { createOptions := []libcontainerd.CreateOption{} // Are we going to run as a Hyper-V container? hvOpts := &libcontainerd.HyperVIsolationOption{} if container.HostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration hvOpts.IsHyperV = daemon.defaultIsolation.IsHyperV() } else { // Container is requesting an isolation mode. Honour it. hvOpts.IsHyperV = container.HostConfig.Isolation.IsHyperV() } // Generate the layer folder of the layer options layerOpts := &libcontainerd.LayerOption{} m, err := container.RWLayer.Metadata() if err != nil { return nil, fmt.Errorf("failed to get layer metadata - %s", err) } if hvOpts.IsHyperV { hvOpts.SandboxPath = filepath.Dir(m["dir"]) } layerOpts.LayerFolderPath = m["dir"] // Generate the layer paths of the layer options img, err := daemon.imageStore.Get(container.ImageID) if err != nil { return nil, fmt.Errorf("failed to graph.Get on ImageID %s - %s", container.ImageID, err) } // Get the layer path for each layer. max := len(img.RootFS.DiffIDs) for i := 1; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] layerPath, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerOpts.LayerPaths = append([]string{layerPath}, layerOpts.LayerPaths...) } // Get endpoints for the libnetwork allocated networks to the container var epList []string AllowUnqualifiedDNSQuery := false gwHNSID := "" if container.NetworkSettings != nil { for n := range container.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := container.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["GW_INFO"] != nil { gwInfo := data["GW_INFO"].(map[string]interface{}) if gwInfo["hnsid"] != nil { gwHNSID = gwInfo["hnsid"].(string) } } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } if data["AllowUnqualifiedDNSQuery"] != nil { AllowUnqualifiedDNSQuery = true } } } if gwHNSID != "" { epList = append(epList, gwHNSID) } // Read and add credentials from the security options if a credential spec has been provided. if container.HostConfig.SecurityOpt != nil { for _, sOpt := range container.HostConfig.SecurityOpt { sOpt = strings.ToLower(sOpt) if !strings.Contains(sOpt, "=") { return nil, fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) } var splitsOpt []string splitsOpt = strings.SplitN(sOpt, "=", 2) if len(splitsOpt) != 2 { return nil, fmt.Errorf("invalid security option: %s", sOpt) } if splitsOpt[0] != "credentialspec" { return nil, fmt.Errorf("security option not supported: %s", splitsOpt[0]) } credentialsOpts := &libcontainerd.CredentialsOption{} var ( match bool csValue string err error ) if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { if csValue == "" { return nil, fmt.Errorf("no value supplied for file:// credential spec security option") } if credentialsOpts.Credentials, err = readCredentialSpecFile(container.ID, daemon.root, filepath.Clean(csValue)); err != nil { return nil, err } } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { if csValue == "" { return nil, fmt.Errorf("no value supplied for registry:// credential spec security option") } if credentialsOpts.Credentials, err = readCredentialSpecRegistry(container.ID, csValue); err != nil { return nil, err } } else { return nil, fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") } createOptions = append(createOptions, credentialsOpts) } } // Now add the remaining options. createOptions = append(createOptions, &libcontainerd.FlushOption{IgnoreFlushesDuringBoot: !container.HasBeenStartedBefore}) createOptions = append(createOptions, hvOpts) createOptions = append(createOptions, layerOpts) if epList != nil { createOptions = append(createOptions, &libcontainerd.NetworkEndpointsOption{Endpoints: epList, AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery}) } return createOptions, nil }
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error { en := &execdriver.Network{ Interface: nil, } var epList []string // Connect all the libnetwork allocated networks to the container if c.NetworkSettings != nil { for n := range c.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := c.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } } } if daemon.netController == nil { parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.bridgeConfig.Iface, PortBindings: c.HostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a // property IPAddress on execDrive.CommonNetworkInterface, // but there is no CLI option in docker to pass through // an IPAddress on docker run. } } default: return fmt.Errorf("invalid network mode: %s", c.HostConfig.NetworkMode) } } // TODO Windows. More resource controls to be implemented later. resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ CPUShares: c.HostConfig.CPUShares, }, } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, ConsoleSize: c.HostConfig.ConsoleSize, } processConfig.Env = env var layerPaths []string img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) } if img.RootFS != nil && img.RootFS.Type == "layers+base" { max := len(img.RootFS.DiffIDs) for i := 0; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } m, err := c.RWLayer.Metadata() if err != nil { return fmt.Errorf("Failed to get layer metadata - %s", err) } layerFolder := m["dir"] var hvPartition bool // Work out the isolation (whether it is a hypervisor partition) if c.HostConfig.Isolation.IsDefault() { // Not specified by caller. Take daemon default hvPartition = windows.DefaultIsolation.IsHyperV() } else { // Take value specified by caller hvPartition = c.HostConfig.Isolation.IsHyperV() } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, Rootfs: c.BaseFS, WorkingDir: c.Config.WorkingDir, Network: en, MountLabel: c.GetMountLabel(), Resources: resources, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), }, FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, Hostname: c.Config.Hostname, Isolation: string(c.HostConfig.Isolation), ArgsEscaped: c.Config.ArgsEscaped, HvPartition: hvPartition, EpList: epList, } return nil }
func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { s := oci.DefaultSpec() linkedEnv, err := daemon.setupLinkedContainers(c) if err != nil { return nil, err } // TODO Windows - this can be removed. Not used (UID/GID) rootUID, rootGID := daemon.GetRemappedUIDGID() if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { return nil, err } img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return nil, fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) } // In base spec s.Hostname = c.FullHostname() // In s.Mounts mounts, err := daemon.setupMounts(c) if err != nil { return nil, err } for _, mount := range mounts { s.Mounts = append(s.Mounts, windowsoci.Mount{ Source: mount.Source, Destination: mount.Destination, Readonly: !mount.Writable, }) } // Are we going to run as a Hyper-V container? hv := false if c.HostConfig.Isolation.IsDefault() { // Container is set to use the default, so take the default from the daemon configuration hv = daemon.defaultIsolation.IsHyperV() } else { // Container is requesting an isolation mode. Honour it. hv = c.HostConfig.Isolation.IsHyperV() } if hv { // TODO We don't yet have the ImagePath hooked up. But set to // something non-nil to pickup in libcontainerd. s.Windows.HvRuntime = &windowsoci.HvRuntime{} } // In s.Process if c.Config.ArgsEscaped { s.Process.Args = append([]string{c.Path}, c.Args...) } else { // TODO (jstarks): escape the entrypoint too once the tests are fixed to not rely on this behavior s.Process.Args = append([]string{c.Path}, escapeArgs(c.Args)...) } s.Process.Cwd = c.Config.WorkingDir s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) s.Process.InitialConsoleSize = c.HostConfig.ConsoleSize s.Process.Terminal = c.Config.Tty s.Process.User.User = c.Config.User // In spec.Root s.Root.Path = c.BaseFS s.Root.Readonly = c.HostConfig.ReadonlyRootfs // In s.Windows s.Windows.FirstStart = !c.HasBeenStartedBefore // s.Windows.LayerFolder. m, err := c.RWLayer.Metadata() if err != nil { return nil, fmt.Errorf("Failed to get layer metadata - %s", err) } s.Windows.LayerFolder = m["dir"] // s.Windows.LayerPaths var layerPaths []string if img.RootFS != nil && img.RootFS.Type == "layers+base" { max := len(img.RootFS.DiffIDs) for i := 0; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return nil, fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } s.Windows.LayerPaths = layerPaths // In s.Windows.Networking (TP5+ libnetwork way of doing things) // Connect all the libnetwork allocated networks to the container var epList []string if c.NetworkSettings != nil { for n := range c.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := c.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } } } s.Windows.Networking = &windowsoci.Networking{ EndpointList: epList, } // In s.Windows.Networking (TP4 back compat) // TODO Windows: Post TP4 - Remove this along with definitions from spec // and changes to libcontainerd to not read these fields. if daemon.netController == nil { parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { s.Windows.Networking = &windowsoci.Networking{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.bridgeConfig.Iface, PortBindings: c.HostConfig.PortBindings, } } default: return nil, fmt.Errorf("invalid network mode: %s", c.HostConfig.NetworkMode) } } // In s.Windows.Resources // @darrenstahlmsft implement these resources cpuShares := uint64(c.HostConfig.CPUShares) s.Windows.Resources = &windowsoci.Resources{ CPU: &windowsoci.CPU{ //TODO Count: ..., //TODO Percent: ..., Shares: &cpuShares, }, Memory: &windowsoci.Memory{ //TODO Limit: ..., //TODO Reservation: ..., }, Network: &windowsoci.Network{ //TODO Bandwidth: ..., }, Storage: &windowsoci.Storage{ //TODO Bps: ..., //TODO Iops: ..., //TODO SandboxSize: ..., }, } return (*libcontainerd.Spec)(&s), nil }