func TestCustomLxcConfigMisc(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, "", false) if err != nil { t.Fatal(err) } processConfig := execdriver.ProcessConfig{ Privileged: false, } processConfig.Env = []string{"HOSTNAME=testhost"} command := &execdriver.Command{ ID: "1", LxcConfig: []string{ "lxc.cgroup.cpuset.cpus = 0,1", }, Network: &execdriver.Network{ Mtu: 1500, Interface: &execdriver.NetworkInterface{ Gateway: "10.10.10.1", IPAddress: "10.10.10.10", IPPrefixLen: 24, Bridge: "docker0", }, }, ProcessConfig: processConfig, CapAdd: []string{"net_admin", "syslog"}, CapDrop: []string{"kill", "mknod"}, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } // network grepFile(t, p, "lxc.network.type = veth") grepFile(t, p, "lxc.network.link = docker0") grepFile(t, p, "lxc.network.name = eth0") grepFile(t, p, "lxc.network.ipv4 = 10.10.10.10/24") grepFile(t, p, "lxc.network.ipv4.gateway = 10.10.10.1") grepFile(t, p, "lxc.network.flags = up") // hostname grepFile(t, p, "lxc.utsname = testhost") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") container := nativeTemplate.New() for _, cap := range container.Capabilities { cap = strings.ToLower(cap) if cap != "mknod" && cap != "kill" { grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", cap)) } } grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = kill"), true) grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = mknod"), true) }
func populateCommand(c *Container, env []string) error { en := &execdriver.Network{ Mtu: c.daemon.config.Mtu, Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { network := c.NetworkSettings en.Interface = &execdriver.NetworkInterface{ MacAddress: network.MacAddress, } } default: return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } pid := &execdriver.Pid{} // TODO Windows. This can probably be factored out. pid.HostPid = c.hostConfig.PidMode.IsHost() // TODO Windows. Resource controls to be implemented later. resources := &execdriver.Resources{} // TODO Windows. Further refactoring required (privileged/user) processConfig := execdriver.ProcessConfig{ Privileged: c.hostConfig.Privileged, Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, User: c.Config.User, } processConfig.Env = env // TODO Windows: Factor out remainder of unused fields. c.command = &execdriver.Command{ ID: c.ID, Rootfs: c.RootfsPath(), ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, Pid: pid, Resources: resources, CapAdd: c.hostConfig.CapAdd.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(), ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), MountLabel: c.GetMountLabel(), } return nil }
func TestCustomLxcConfigMisc(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, root, "", true) if err != nil { t.Fatal(err) } processConfig := execdriver.ProcessConfig{ Privileged: false, } processConfig.Env = []string{"HOSTNAME=testhost"} command := &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: "1", Network: &execdriver.Network{ Mtu: 1500, }, ProcessConfig: processConfig, }, LxcConfig: []string{ "lxc.cgroup.cpuset.cpus = 0,1", }, CapAdd: []string{"net_admin", "syslog"}, CapDrop: []string{"kill", "mknod"}, AppArmorProfile: "lxc-container-default-with-nesting", } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } grepFile(t, p, "lxc.aa_profile = lxc-container-default-with-nesting") // hostname grepFile(t, p, "lxc.utsname = testhost") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") container := nativeTemplate.New() for _, cap := range container.Capabilities { realCap := execdriver.GetCapability(cap) numCap := fmt.Sprintf("%d", realCap.Value) if cap != "MKNOD" && cap != "KILL" { grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap)) } } grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true) grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true) }
func TestCustomLxcConfigMiscOverride(t *testing.T) { root, err := ioutil.TempDir("", "TestCustomLxcConfig") if err != nil { t.Fatal(err) } defer os.RemoveAll(root) os.MkdirAll(path.Join(root, "containers", "1"), 0777) driver, err := NewDriver(root, root, "", false) if err != nil { t.Fatal(err) } processConfig := execdriver.ProcessConfig{ Privileged: false, } processConfig.Env = []string{"HOSTNAME=testhost"} command := &execdriver.Command{ ID: "1", LxcConfig: []string{ "lxc.cgroup.cpuset.cpus = 0,1", "lxc.network.ipv4 = 172.0.0.1", }, Network: &execdriver.Network{ Mtu: 1500, Interface: nil, }, ProcessConfig: processConfig, CapAdd: []string{"NET_ADMIN", "SYSLOG"}, CapDrop: []string{"KILL", "MKNOD"}, } p, err := driver.generateLXCConfig(command) if err != nil { t.Fatal(err) } // hostname grepFile(t, p, "lxc.utsname = testhost") grepFile(t, p, "lxc.cgroup.cpuset.cpus = 0,1") container := nativeTemplate.New() for _, cap := range container.Capabilities { realCap := execdriver.GetCapability(cap) numCap := fmt.Sprintf("%d", realCap.Value) if cap != "MKNOD" && cap != "KILL" { grepFile(t, p, fmt.Sprintf("lxc.cap.keep = %s", numCap)) } } grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_KILL), true) grepFileWithReverse(t, p, fmt.Sprintf("lxc.cap.keep = %d", capability.CAP_MKNOD), true) }
func (daemon *Daemon) populateCommand(c *Container, env []string) error { var en *execdriver.Network if !c.Config.NetworkDisabled { en = &execdriver.Network{} if !daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() { en.NamespacePath = c.NetworkSettings.SandboxKey } if c.hostConfig.NetworkMode.IsContainer() { nc, err := daemon.getNetworkedContainer(c.ID, c.hostConfig.NetworkMode.ConnectedContainer()) if err != nil { return err } en.ContainerID = nc.ID } } ipc := &execdriver.Ipc{} var err error c.ShmPath, err = c.shmPath() if err != nil { return err } c.MqueuePath, err = c.mqueuePath() if err != nil { return err } if c.hostConfig.IpcMode.IsContainer() { ic, err := daemon.getIpcContainer(c) if err != nil { return err } ipc.ContainerID = ic.ID c.ShmPath = ic.ShmPath c.MqueuePath = ic.MqueuePath } else { ipc.HostIpc = c.hostConfig.IpcMode.IsHost() if ipc.HostIpc { if _, err := os.Stat("/dev/shm"); err != nil { return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") } if _, err := os.Stat("/dev/mqueue"); err != nil { return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host") } c.ShmPath = "/dev/shm" c.MqueuePath = "/dev/mqueue" } } pid := &execdriver.Pid{} pid.HostPid = c.hostConfig.PidMode.IsHost() uts := &execdriver.UTS{ HostUTS: c.hostConfig.UTSMode.IsHost(), } // Build lists of devices allowed and created within the container. var userSpecifiedDevices []*configs.Device for _, deviceMapping := range c.hostConfig.Devices { devs, err := getDevicesFromPath(deviceMapping) if err != nil { return err } userSpecifiedDevices = append(userSpecifiedDevices, devs...) } allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices) autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) var rlimits []*ulimit.Rlimit ulimits := c.hostConfig.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]*ulimit.Ulimit) for _, ul := range ulimits { ulIdx[ul.Name] = ul } for name, ul := range daemon.configStore.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } weightDevices, err := getBlkioWeightDevices(c.hostConfig) if err != nil { return err } for _, limit := range ulimits { rl, err := limit.GetRlimit() if err != nil { return err } rlimits = append(rlimits, rl) } resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ Memory: c.hostConfig.Memory, MemoryReservation: c.hostConfig.MemoryReservation, CPUShares: c.hostConfig.CPUShares, BlkioWeight: c.hostConfig.BlkioWeight, }, MemorySwap: c.hostConfig.MemorySwap, KernelMemory: c.hostConfig.KernelMemory, CpusetCpus: c.hostConfig.CpusetCpus, CpusetMems: c.hostConfig.CpusetMems, CPUPeriod: c.hostConfig.CPUPeriod, CPUQuota: c.hostConfig.CPUQuota, Rlimits: rlimits, BlkioWeightDevice: weightDevices, OomKillDisable: c.hostConfig.OomKillDisable, MemorySwappiness: -1, } if c.hostConfig.MemorySwappiness != nil { resources.MemorySwappiness = *c.hostConfig.MemorySwappiness } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, Privileged: c.hostConfig.Privileged, User: c.Config.User, } processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env remappedRoot := &execdriver.User{} rootUID, rootGID := daemon.GetRemappedUIDGID() if rootUID != 0 { remappedRoot.UID = rootUID remappedRoot.GID = rootGID } uidMap, gidMap := daemon.GetUIDGIDMaps() c.command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, InitPath: "/.dockerinit", MountLabel: c.getMountLabel(), Network: en, ProcessConfig: processConfig, ProcessLabel: c.getProcessLabel(), Rootfs: c.rootfsPath(), Resources: resources, WorkingDir: c.Config.WorkingDir, }, AllowedDevices: allowedDevices, AppArmorProfile: c.AppArmorProfile, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.hostConfig.CapAdd.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(), CgroupParent: c.hostConfig.CgroupParent, GIDMapping: gidMap, GroupAdd: c.hostConfig.GroupAdd, Ipc: ipc, Pid: pid, ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, RemappedRoot: remappedRoot, UIDMapping: uidMap, UTS: uts, } return nil }
func populateCommand(c *Container, env []string) error { en := &execdriver.Network{ Mtu: c.daemon.config.Mtu, Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "host": en.HostNetworking = true case "bridge", "": // empty string to support existing containers if !c.Config.NetworkDisabled { network := c.NetworkSettings en.Interface = &execdriver.NetworkInterface{ Gateway: network.Gateway, Bridge: network.Bridge, IPAddress: network.IPAddress, IPPrefixLen: network.IPPrefixLen, MacAddress: network.MacAddress, LinkLocalIPv6Address: network.LinkLocalIPv6Address, GlobalIPv6Address: network.GlobalIPv6Address, GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, IPv6Gateway: network.IPv6Gateway, } } case "container": nc, err := c.getNetworkedContainer() if err != nil { return err } en.ContainerID = nc.ID default: return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } ipc := &execdriver.Ipc{} if c.hostConfig.IpcMode.IsContainer() { ic, err := c.getIpcContainer() if err != nil { return err } ipc.ContainerID = ic.ID } else { ipc.HostIpc = c.hostConfig.IpcMode.IsHost() } pid := &execdriver.Pid{} pid.HostPid = c.hostConfig.PidMode.IsHost() // Build lists of devices allowed and created within the container. userSpecifiedDevices := make([]*configs.Device, len(c.hostConfig.Devices)) for i, deviceMapping := range c.hostConfig.Devices { device, err := devices.DeviceFromPath(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) if err != nil { return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) } device.Path = deviceMapping.PathInContainer userSpecifiedDevices[i] = device } allowedDevices := append(configs.DefaultAllowedDevices, userSpecifiedDevices...) autoCreatedDevices := append(configs.DefaultAutoCreatedDevices, userSpecifiedDevices...) // TODO: this can be removed after lxc-conf is fully deprecated lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig) if err != nil { return err } var rlimits []*ulimit.Rlimit ulimits := c.hostConfig.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]*ulimit.Ulimit) for _, ul := range ulimits { ulIdx[ul.Name] = ul } for name, ul := range c.daemon.config.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } for _, limit := range ulimits { rl, err := limit.GetRlimit() if err != nil { return err } rlimits = append(rlimits, rl) } resources := &execdriver.Resources{ Memory: c.hostConfig.Memory, MemorySwap: c.hostConfig.MemorySwap, CpuShares: c.hostConfig.CpuShares, CpusetCpus: c.hostConfig.CpusetCpus, Rlimits: rlimits, } processConfig := execdriver.ProcessConfig{ Privileged: c.hostConfig.Privileged, Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, User: c.Config.User, } processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env c.command = &execdriver.Command{ ID: c.ID, Rootfs: c.RootfsPath(), ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, Ipc: ipc, Pid: pid, Resources: resources, AllowedDevices: allowedDevices, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.hostConfig.CapAdd, CapDrop: c.hostConfig.CapDrop, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), MountLabel: c.GetMountLabel(), LxcConfig: lxcConfig, AppArmorProfile: c.AppArmorProfile, CgroupParent: c.hostConfig.CgroupParent, } return nil }
func populateCommand(c *Container, env []string) error { var en *execdriver.Network if !c.Config.NetworkDisabled { en = &execdriver.Network{} if !c.daemon.execDriver.SupportsHooks() || c.hostConfig.NetworkMode.IsHost() { en.NamespacePath = c.NetworkSettings.SandboxKey } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) if parts[0] == "container" { nc, err := c.getNetworkedContainer() if err != nil { return err } en.ContainerID = nc.ID } } ipc := &execdriver.Ipc{} if c.hostConfig.IpcMode.IsContainer() { ic, err := c.getIpcContainer() if err != nil { return err } ipc.ContainerID = ic.ID } else { ipc.HostIpc = c.hostConfig.IpcMode.IsHost() } pid := &execdriver.Pid{} pid.HostPid = c.hostConfig.PidMode.IsHost() uts := &execdriver.UTS{ HostUTS: c.hostConfig.UTSMode.IsHost(), } // Build lists of devices allowed and created within the container. var userSpecifiedDevices []*configs.Device for _, deviceMapping := range c.hostConfig.Devices { devs, err := getDevicesFromPath(deviceMapping) if err != nil { return err } userSpecifiedDevices = append(userSpecifiedDevices, devs...) } allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices) autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) // TODO: this can be removed after lxc-conf is fully deprecated lxcConfig, err := mergeLxcConfIntoOptions(c.hostConfig) if err != nil { return err } var rlimits []*ulimit.Rlimit ulimits := c.hostConfig.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]*ulimit.Ulimit) for _, ul := range ulimits { ulIdx[ul.Name] = ul } for name, ul := range c.daemon.configStore.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } for _, limit := range ulimits { rl, err := limit.GetRlimit() if err != nil { return err } rlimits = append(rlimits, rl) } resources := &execdriver.Resources{ Memory: c.hostConfig.Memory, MemorySwap: c.hostConfig.MemorySwap, KernelMemory: c.hostConfig.KernelMemory, CPUShares: c.hostConfig.CPUShares, CpusetCpus: c.hostConfig.CpusetCpus, CpusetMems: c.hostConfig.CpusetMems, CPUPeriod: c.hostConfig.CPUPeriod, CPUQuota: c.hostConfig.CPUQuota, BlkioWeight: c.hostConfig.BlkioWeight, BlkioReadLimit: constructBlkioArgs(c.hostConfig.Binds, c.hostConfig.BlkioReadLimit), Rlimits: rlimits, OomKillDisable: c.hostConfig.OomKillDisable, MemorySwappiness: -1, } if c.hostConfig.MemorySwappiness != nil { resources.MemorySwappiness = *c.hostConfig.MemorySwappiness } processConfig := execdriver.ProcessConfig{ Privileged: c.hostConfig.Privileged, Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, User: c.Config.User, } processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env c.command = &execdriver.Command{ ID: c.ID, Rootfs: c.rootfsPath(), ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, Ipc: ipc, Pid: pid, UTS: uts, Resources: resources, AllowedDevices: allowedDevices, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.hostConfig.CapAdd.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(), GroupAdd: c.hostConfig.GroupAdd, ProcessConfig: processConfig, ProcessLabel: c.getProcessLabel(), MountLabel: c.getMountLabel(), LxcConfig: lxcConfig, AppArmorProfile: c.AppArmorProfile, CgroupParent: c.hostConfig.CgroupParent, } return nil }
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error { en := &execdriver.Network{ Interface: nil, } parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.bridgeConfig.VirtualSwitchName, PortBindings: c.HostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a // property IPAddress on execDrive.CommonNetworkInterface, // but there is no CLI option in docker to pass through // an IPAddress on docker run. } } default: return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.HostConfig.NetworkMode) } // TODO Windows. More resource controls to be implemented later. resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ CPUShares: c.HostConfig.CPUShares, }, } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, ConsoleSize: c.HostConfig.ConsoleSize, } processConfig.Env = env var layerPaths []string img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) } if img.RootFS != nil && img.RootFS.Type == "layers+base" { max := len(img.RootFS.DiffIDs) for i := 0; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return derr.ErrorCodeGetLayer.WithArgs(err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } m, err := c.RWLayer.Metadata() if err != nil { return derr.ErrorCodeGetLayerMetadata.WithArgs(err) } layerFolder := m["dir"] var hvPartition bool // Work out the isolation (whether it is a hypervisor partition) if c.HostConfig.Isolation.IsDefault() { // Not specified by caller. Take daemon default hvPartition = windows.DefaultIsolation.IsHyperV() } else { // Take value specified by caller hvPartition = c.HostConfig.Isolation.IsHyperV() } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, Rootfs: c.BaseFS, WorkingDir: c.Config.WorkingDir, Network: en, MountLabel: c.GetMountLabel(), Resources: resources, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), }, FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, Hostname: c.Config.Hostname, Isolation: string(c.HostConfig.Isolation), ArgsEscaped: c.Config.ArgsEscaped, HvPartition: hvPartition, } return nil }
func populateCommand(ctx context.Context, c *Container, env []string) error { en := &execdriver.Network{ Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, Bridge: c.daemon.configStore.Bridge.VirtualSwitchName, PortBindings: c.hostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a // property IPAddress on execDrive.CommonNetworkInterface, // but there is no CLI option in docker to pass through // an IPAddress on docker run. } } default: return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.hostConfig.NetworkMode) } pid := &execdriver.Pid{} // TODO Windows. This can probably be factored out. pid.HostPid = c.hostConfig.PidMode.IsHost() // TODO Windows. More resource controls to be implemented later. resources := &execdriver.Resources{ CPUShares: c.hostConfig.CPUShares, } // TODO Windows. Further refactoring required (privileged/user) processConfig := execdriver.ProcessConfig{ Privileged: c.hostConfig.Privileged, Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, User: c.Config.User, ConsoleSize: c.hostConfig.ConsoleSize, } processConfig.Env = env var layerPaths []string img, err := c.daemon.graph.Get(c.ImageID) if err != nil { return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) } for i := img; i != nil && err == nil; i, err = c.daemon.graph.GetParent(i) { lp, err := c.daemon.driver.Get(i.ID, "") if err != nil { return derr.ErrorCodeGetLayer.WithArgs(c.daemon.driver.String(), i.ID, err) } layerPaths = append(layerPaths, lp) err = c.daemon.driver.Put(i.ID) if err != nil { return derr.ErrorCodePutLayer.WithArgs(c.daemon.driver.String(), i.ID, err) } } m, err := c.daemon.driver.GetMetadata(c.ID) if err != nil { return derr.ErrorCodeGetLayerMetadata.WithArgs(err) } layerFolder := m["dir"] // TODO Windows: Factor out remainder of unused fields. c.command = &execdriver.Command{ ID: c.ID, Rootfs: c.rootfsPath(), ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, Pid: pid, Resources: resources, CapAdd: c.hostConfig.CapAdd.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(), ProcessConfig: processConfig, ProcessLabel: c.getProcessLabel(), MountLabel: c.getMountLabel(), FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, } return nil }
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error { var en *execdriver.Network if !c.Config.NetworkDisabled { en = &execdriver.Network{} if !daemon.execDriver.SupportsHooks() || c.HostConfig.NetworkMode.IsHost() { en.NamespacePath = c.NetworkSettings.SandboxKey } if c.HostConfig.NetworkMode.IsContainer() { nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) if err != nil { return err } en.ContainerID = nc.ID } } ipc := &execdriver.Ipc{} var err error c.ShmPath, err = c.ShmResourcePath() if err != nil { return err } c.MqueuePath, err = c.MqueueResourcePath() if err != nil { return err } if c.HostConfig.IpcMode.IsContainer() { ic, err := daemon.getIpcContainer(c) if err != nil { return err } ipc.ContainerID = ic.ID c.ShmPath = ic.ShmPath c.MqueuePath = ic.MqueuePath } else { ipc.HostIpc = c.HostConfig.IpcMode.IsHost() if ipc.HostIpc { if _, err := os.Stat("/dev/shm"); err != nil { return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") } if _, err := os.Stat("/dev/mqueue"); err != nil { return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host") } c.ShmPath = "/dev/shm" c.MqueuePath = "/dev/mqueue" } } pid := &execdriver.Pid{} pid.HostPid = c.HostConfig.PidMode.IsHost() uts := &execdriver.UTS{ HostUTS: c.HostConfig.UTSMode.IsHost(), } // Build lists of devices allowed and created within the container. var userSpecifiedDevices []*configs.Device for _, deviceMapping := range c.HostConfig.Devices { devs, err := getDevicesFromPath(deviceMapping) if err != nil { return err } userSpecifiedDevices = append(userSpecifiedDevices, devs...) } allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices) autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices) var rlimits []*units.Rlimit ulimits := c.HostConfig.Ulimits // Merge ulimits with daemon defaults ulIdx := make(map[string]*units.Ulimit) for _, ul := range ulimits { ulIdx[ul.Name] = ul } for name, ul := range daemon.configStore.Ulimits { if _, exists := ulIdx[name]; !exists { ulimits = append(ulimits, ul) } } weightDevices, err := getBlkioWeightDevices(c.HostConfig) if err != nil { return err } readBpsDevice, err := getBlkioReadBpsDevices(c.HostConfig) if err != nil { return err } writeBpsDevice, err := getBlkioWriteBpsDevices(c.HostConfig) if err != nil { return err } readIOpsDevice, err := getBlkioReadIOpsDevices(c.HostConfig) if err != nil { return err } writeIOpsDevice, err := getBlkioWriteIOpsDevices(c.HostConfig) if err != nil { return err } for _, limit := range ulimits { rl, err := limit.GetRlimit() if err != nil { return err } rlimits = append(rlimits, rl) } resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ Memory: c.HostConfig.Memory, MemoryReservation: c.HostConfig.MemoryReservation, CPUShares: c.HostConfig.CPUShares, BlkioWeight: c.HostConfig.BlkioWeight, }, MemorySwap: c.HostConfig.MemorySwap, KernelMemory: c.HostConfig.KernelMemory, CpusetCpus: c.HostConfig.CpusetCpus, CpusetMems: c.HostConfig.CpusetMems, CPUPeriod: c.HostConfig.CPUPeriod, CPUQuota: c.HostConfig.CPUQuota, Rlimits: rlimits, BlkioWeightDevice: weightDevices, BlkioThrottleReadBpsDevice: readBpsDevice, BlkioThrottleWriteBpsDevice: writeBpsDevice, BlkioThrottleReadIOpsDevice: readIOpsDevice, BlkioThrottleWriteIOpsDevice: writeIOpsDevice, OomKillDisable: *c.HostConfig.OomKillDisable, MemorySwappiness: -1, } if c.HostConfig.MemorySwappiness != nil { resources.MemorySwappiness = *c.HostConfig.MemorySwappiness } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, Privileged: c.HostConfig.Privileged, User: c.Config.User, } processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env remappedRoot := &execdriver.User{} rootUID, rootGID := daemon.GetRemappedUIDGID() if rootUID != 0 { remappedRoot.UID = rootUID remappedRoot.GID = rootGID } uidMap, gidMap := daemon.GetUIDGIDMaps() if !daemon.seccompEnabled { if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") } logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") c.SeccompProfile = "unconfined" } defaultCgroupParent := "/docker" if daemon.configStore.CgroupParent != "" { defaultCgroupParent = daemon.configStore.CgroupParent } else { for _, option := range daemon.configStore.ExecOptions { key, val, err := parsers.ParseKeyValueOpt(option) if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { continue } if val == "systemd" { defaultCgroupParent = "system.slice" } } } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, InitPath: "/.dockerinit", MountLabel: c.GetMountLabel(), Network: en, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), Rootfs: c.BaseFS, Resources: resources, WorkingDir: c.Config.WorkingDir, }, AllowedDevices: allowedDevices, AppArmorProfile: c.AppArmorProfile, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.HostConfig.CapAdd.Slice(), CapDrop: c.HostConfig.CapDrop.Slice(), CgroupParent: defaultCgroupParent, GIDMapping: gidMap, GroupAdd: c.HostConfig.GroupAdd, Ipc: ipc, OomScoreAdj: c.HostConfig.OomScoreAdj, Pid: pid, ReadonlyRootfs: c.HostConfig.ReadonlyRootfs, RemappedRoot: remappedRoot, SeccompProfile: c.SeccompProfile, UIDMapping: uidMap, UTS: uts, } if c.HostConfig.CgroupParent != "" { c.Command.CgroupParent = c.HostConfig.CgroupParent } return nil }
func populateCommand(c *Container, env []string) error { en := &execdriver.Network{ Mtu: c.daemon.config.Mtu, Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { network := c.NetworkSettings en.Interface = &execdriver.NetworkInterface{ MacAddress: network.MacAddress, Bridge: c.daemon.config.Bridge.VirtualSwitchName, } } default: return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } pid := &execdriver.Pid{} // TODO Windows. This can probably be factored out. pid.HostPid = c.hostConfig.PidMode.IsHost() // TODO Windows. Resource controls to be implemented later. resources := &execdriver.Resources{} // TODO Windows. Further refactoring required (privileged/user) processConfig := execdriver.ProcessConfig{ Privileged: c.hostConfig.Privileged, Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, User: c.Config.User, ConsoleSize: c.hostConfig.ConsoleSize, } processConfig.Env = env var layerFolder string var layerPaths []string // The following is specific to the Windows driver. We do this to // enable VFS to continue operating for development purposes. if wd, ok := c.daemon.driver.(*windows.WindowsGraphDriver); ok { var err error var img *image.Image var ids []string if img, err = c.daemon.graph.Get(c.ImageID); err != nil { return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) } if ids, err = c.daemon.graph.ParentLayerIds(img); err != nil { return fmt.Errorf("Failed to get parentlayer ids %s", img.ID) } layerPaths = wd.LayerIdsToPaths(ids) layerFolder = filepath.Join(wd.Info().HomeDir, filepath.Base(c.ID)) } // TODO Windows: Factor out remainder of unused fields. c.command = &execdriver.Command{ ID: c.ID, Rootfs: c.RootfsPath(), ReadonlyRootfs: c.hostConfig.ReadonlyRootfs, InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, Pid: pid, Resources: resources, CapAdd: c.hostConfig.CapAdd.Slice(), CapDrop: c.hostConfig.CapDrop.Slice(), ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), MountLabel: c.GetMountLabel(), FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, } return nil }
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error { en := &execdriver.Network{ Interface: nil, } var epList []string // Connect all the libnetwork allocated networks to the container if c.NetworkSettings != nil { for n := range c.NetworkSettings.Networks { sn, err := daemon.FindNetwork(n) if err != nil { continue } ep, err := c.GetEndpointInNetwork(sn) if err != nil { continue } data, err := ep.DriverInfo() if err != nil { continue } if data["hnsid"] != nil { epList = append(epList, data["hnsid"].(string)) } } } if daemon.netController == nil { parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.bridgeConfig.Iface, PortBindings: c.HostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a // property IPAddress on execDrive.CommonNetworkInterface, // but there is no CLI option in docker to pass through // an IPAddress on docker run. } } default: return fmt.Errorf("invalid network mode: %s", c.HostConfig.NetworkMode) } } // TODO Windows. More resource controls to be implemented later. resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ CPUShares: c.HostConfig.CPUShares, }, } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, ConsoleSize: c.HostConfig.ConsoleSize, } processConfig.Env = env var layerPaths []string img, err := daemon.imageStore.Get(c.ImageID) if err != nil { return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) } if img.RootFS != nil && img.RootFS.Type == "layers+base" { max := len(img.RootFS.DiffIDs) for i := 0; i <= max; i++ { img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) if err != nil { return fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) } // Reverse order, expecting parent most first layerPaths = append([]string{path}, layerPaths...) } } m, err := c.RWLayer.Metadata() if err != nil { return fmt.Errorf("Failed to get layer metadata - %s", err) } layerFolder := m["dir"] var hvPartition bool // Work out the isolation (whether it is a hypervisor partition) if c.HostConfig.Isolation.IsDefault() { // Not specified by caller. Take daemon default hvPartition = windows.DefaultIsolation.IsHyperV() } else { // Take value specified by caller hvPartition = c.HostConfig.Isolation.IsHyperV() } c.Command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, Rootfs: c.BaseFS, WorkingDir: c.Config.WorkingDir, Network: en, MountLabel: c.GetMountLabel(), Resources: resources, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), }, FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, Hostname: c.Config.Hostname, Isolation: string(c.HostConfig.Isolation), ArgsEscaped: c.Config.ArgsEscaped, HvPartition: hvPartition, EpList: epList, } return nil }
func populateCommand(c *Container, env []string) error { en := &execdriver.Network{ Mtu: c.daemon.config.Mtu, Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "host": en.HostNetworking = true case "bridge", "": // empty string to support existing containers if !c.Config.NetworkDisabled { network := c.NetworkSettings en.Interface = &execdriver.NetworkInterface{ Gateway: network.Gateway, Bridge: network.Bridge, IPAddress: network.IPAddress, IPPrefixLen: network.IPPrefixLen, } } case "container": nc, err := c.getNetworkedContainer() if err != nil { return err } en.ContainerID = nc.ID default: return fmt.Errorf("invalid network mode: %s", c.hostConfig.NetworkMode) } // Build lists of devices allowed and created within the container. userSpecifiedDevices := make([]*devices.Device, len(c.hostConfig.Devices)) for i, deviceMapping := range c.hostConfig.Devices { device, err := devices.GetDevice(deviceMapping.PathOnHost, deviceMapping.CgroupPermissions) if err != nil { return fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) } device.Path = deviceMapping.PathInContainer userSpecifiedDevices[i] = device } allowedDevices := append(devices.DefaultAllowedDevices, userSpecifiedDevices...) autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...) // TODO: this can be removed after lxc-conf is fully deprecated lxcConfig := mergeLxcConfIntoOptions(c.hostConfig) resources := &execdriver.Resources{ Memory: c.Config.Memory, MemorySwap: c.Config.MemorySwap, CpuShares: c.Config.CpuShares, Cpuset: c.Config.Cpuset, } processConfig := execdriver.ProcessConfig{ Privileged: c.hostConfig.Privileged, Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, User: c.Config.User, } processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true} processConfig.Env = env c.command = &execdriver.Command{ ID: c.ID, Rootfs: c.RootfsPath(), InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, Resources: resources, AllowedDevices: allowedDevices, AutoCreatedDevices: autoCreatedDevices, CapAdd: c.hostConfig.CapAdd, CapDrop: c.hostConfig.CapDrop, ProcessConfig: processConfig, ProcessLabel: c.GetProcessLabel(), MountLabel: c.GetMountLabel(), LxcConfig: lxcConfig, AppArmorProfile: c.AppArmorProfile, } return nil }
func (daemon *Daemon) populateCommand(c *Container, env []string) error { en := &execdriver.Network{ Interface: nil, } parts := strings.SplitN(string(c.hostConfig.NetworkMode), ":", 2) switch parts[0] { case "none": case "default", "": // empty string to support existing containers if !c.Config.NetworkDisabled { en.Interface = &execdriver.NetworkInterface{ MacAddress: c.Config.MacAddress, Bridge: daemon.configStore.Bridge.VirtualSwitchName, PortBindings: c.hostConfig.PortBindings, // TODO Windows. Include IPAddress. There already is a // property IPAddress on execDrive.CommonNetworkInterface, // but there is no CLI option in docker to pass through // an IPAddress on docker run. } } default: return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.hostConfig.NetworkMode) } // TODO Windows. More resource controls to be implemented later. resources := &execdriver.Resources{ CommonResources: execdriver.CommonResources{ CPUShares: c.hostConfig.CPUShares, }, } processConfig := execdriver.ProcessConfig{ CommonProcessConfig: execdriver.CommonProcessConfig{ Entrypoint: c.Path, Arguments: c.Args, Tty: c.Config.Tty, }, ConsoleSize: c.hostConfig.ConsoleSize, } processConfig.Env = env var layerPaths []string img, err := daemon.graph.Get(c.ImageID) if err != nil { return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err) } for i := img; i != nil && err == nil; i, err = daemon.graph.GetParent(i) { lp, err := daemon.driver.Get(i.ID, "") if err != nil { return derr.ErrorCodeGetLayer.WithArgs(daemon.driver.String(), i.ID, err) } layerPaths = append(layerPaths, lp) err = daemon.driver.Put(i.ID) if err != nil { return derr.ErrorCodePutLayer.WithArgs(daemon.driver.String(), i.ID, err) } } m, err := daemon.driver.GetMetadata(c.ID) if err != nil { return derr.ErrorCodeGetLayerMetadata.WithArgs(err) } layerFolder := m["dir"] c.command = &execdriver.Command{ CommonCommand: execdriver.CommonCommand{ ID: c.ID, Rootfs: c.rootfsPath(), InitPath: "/.dockerinit", WorkingDir: c.Config.WorkingDir, Network: en, MountLabel: c.getMountLabel(), Resources: resources, ProcessConfig: processConfig, ProcessLabel: c.getProcessLabel(), }, FirstStart: !c.HasBeenStartedBefore, LayerFolder: layerFolder, LayerPaths: layerPaths, Hostname: c.Config.Hostname, Isolation: c.hostConfig.Isolation, } return nil }