Example #1
0
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
	var layerID layer.ChainID
	if container.ImageID != "" {
		img, err := daemon.imageStore.Get(container.ImageID)
		if err != nil {
			return err
		}
		layerID = img.RootFS.ChainID()
	}
	rwlayer, err := daemon.layerStore.Mount(container.ID, layerID, container.GetMountLabel(), daemon.setupInitLayer)
	if err != nil {
		return err
	}
	dir, err := rwlayer.Path()
	if err != nil {
		return err
	}
	logrus.Debugf("container mounted via layerStore: %v", dir)

	if container.BaseFS != dir {
		// The mount path reported by the graph driver should always be trusted on Windows, since the
		// volume path for a given mounted layer may change over time.  This should only be an error
		// on non-Windows operating systems.
		if container.BaseFS != "" && runtime.GOOS != "windows" {
			daemon.Unmount(container)
			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
				daemon.driver, container.ID, container.BaseFS, dir)
		}
	}
	container.BaseFS = dir // TODO: combine these fields
	container.RWLayer = rwlayer
	return nil
}
Example #2
0
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
	dir, err := container.RWLayer.Mount(container.GetMountLabel())
	if err != nil {
		return err
	}
	logrus.Debugf("container mounted via layerStore: %v", dir)

	if container.BaseFS != dir {
		// The mount path reported by the graph driver should always be trusted on Windows, since the
		// volume path for a given mounted layer may change over time.  This should only be an error
		// on non-Windows operating systems.
		if container.BaseFS != "" && runtime.GOOS != "windows" {
			daemon.Unmount(container)
			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
				daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
		}
	}
	container.BaseFS = dir // TODO: combine these fields
	return nil
}
func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
	rootUID, rootGID := daemon.GetRemappedUIDGID()
	if !c.HasMountFor("/dev/shm") {
		shmPath, err := c.ShmResourcePath()
		if err != nil {
			return err
		}

		if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil {
			return err
		}

		shmSize := container.DefaultSHMSize
		if c.HostConfig.ShmSize != 0 {
			shmSize = c.HostConfig.ShmSize
		}
		shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10)
		if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
			return fmt.Errorf("mounting shm tmpfs: %s", err)
		}
		if err := os.Chown(shmPath, rootUID, rootGID); err != nil {
			return err
		}
	}

	if !c.HasMountFor("/dev/mqueue") {
		mqueuePath, err := c.MqueueResourcePath()
		if err != nil {
			return err
		}

		if err := idtools.MkdirAllAs(mqueuePath, 0700, rootUID, rootGID); err != nil {
			return err
		}

		if err := syscall.Mount("mqueue", mqueuePath, "mqueue", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), ""); err != nil {
			return fmt.Errorf("mounting mqueue mqueue : %s", err)
		}
	}

	return nil
}
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
	var en *execdriver.Network
	if !c.Config.NetworkDisabled {
		en = &execdriver.Network{}
		if !daemon.execDriver.SupportsHooks() || c.HostConfig.NetworkMode.IsHost() {
			en.NamespacePath = c.NetworkSettings.SandboxKey
		}

		if c.HostConfig.NetworkMode.IsContainer() {
			nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer())
			if err != nil {
				return err
			}
			en.ContainerID = nc.ID
		}
	}

	ipc := &execdriver.Ipc{}
	var err error
	c.ShmPath, err = c.ShmResourcePath()
	if err != nil {
		return err
	}

	c.MqueuePath, err = c.MqueueResourcePath()
	if err != nil {
		return err
	}

	if c.HostConfig.IpcMode.IsContainer() {
		ic, err := daemon.getIpcContainer(c)
		if err != nil {
			return err
		}
		ipc.ContainerID = ic.ID
		c.ShmPath = ic.ShmPath
		c.MqueuePath = ic.MqueuePath
	} else {
		ipc.HostIpc = c.HostConfig.IpcMode.IsHost()
		if ipc.HostIpc {
			if _, err := os.Stat("/dev/shm"); err != nil {
				return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
			}
			if _, err := os.Stat("/dev/mqueue"); err != nil {
				return fmt.Errorf("/dev/mqueue is not mounted, but must be for --ipc=host")
			}
			c.ShmPath = "/dev/shm"
			c.MqueuePath = "/dev/mqueue"
		}
	}

	pid := &execdriver.Pid{}
	pid.HostPid = c.HostConfig.PidMode.IsHost()

	uts := &execdriver.UTS{
		HostUTS: c.HostConfig.UTSMode.IsHost(),
	}

	// Build lists of devices allowed and created within the container.
	var userSpecifiedDevices []*configs.Device
	for _, deviceMapping := range c.HostConfig.Devices {
		devs, err := getDevicesFromPath(deviceMapping)
		if err != nil {
			return err
		}

		userSpecifiedDevices = append(userSpecifiedDevices, devs...)
	}

	allowedDevices := mergeDevices(configs.DefaultAllowedDevices, userSpecifiedDevices)

	autoCreatedDevices := mergeDevices(configs.DefaultAutoCreatedDevices, userSpecifiedDevices)

	var rlimits []*units.Rlimit
	ulimits := c.HostConfig.Ulimits

	// Merge ulimits with daemon defaults
	ulIdx := make(map[string]*units.Ulimit)
	for _, ul := range ulimits {
		ulIdx[ul.Name] = ul
	}
	for name, ul := range daemon.configStore.Ulimits {
		if _, exists := ulIdx[name]; !exists {
			ulimits = append(ulimits, ul)
		}
	}

	weightDevices, err := getBlkioWeightDevices(c.HostConfig)
	if err != nil {
		return err
	}

	readBpsDevice, err := getBlkioReadBpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	writeBpsDevice, err := getBlkioWriteBpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	readIOpsDevice, err := getBlkioReadIOpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	writeIOpsDevice, err := getBlkioWriteIOpsDevices(c.HostConfig)
	if err != nil {
		return err
	}

	for _, limit := range ulimits {
		rl, err := limit.GetRlimit()
		if err != nil {
			return err
		}
		rlimits = append(rlimits, rl)
	}

	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			Memory:            c.HostConfig.Memory,
			MemoryReservation: c.HostConfig.MemoryReservation,
			CPUShares:         c.HostConfig.CPUShares,
			BlkioWeight:       c.HostConfig.BlkioWeight,
		},
		MemorySwap:                   c.HostConfig.MemorySwap,
		KernelMemory:                 c.HostConfig.KernelMemory,
		CpusetCpus:                   c.HostConfig.CpusetCpus,
		CpusetMems:                   c.HostConfig.CpusetMems,
		CPUPeriod:                    c.HostConfig.CPUPeriod,
		CPUQuota:                     c.HostConfig.CPUQuota,
		Rlimits:                      rlimits,
		BlkioWeightDevice:            weightDevices,
		BlkioThrottleReadBpsDevice:   readBpsDevice,
		BlkioThrottleWriteBpsDevice:  writeBpsDevice,
		BlkioThrottleReadIOpsDevice:  readIOpsDevice,
		BlkioThrottleWriteIOpsDevice: writeIOpsDevice,
		OomKillDisable:               c.HostConfig.OomKillDisable,
		MemorySwappiness:             -1,
	}

	if c.HostConfig.MemorySwappiness != nil {
		resources.MemorySwappiness = *c.HostConfig.MemorySwappiness
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		Privileged: c.HostConfig.Privileged,
		User:       c.Config.User,
	}

	processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
	processConfig.Env = env

	remappedRoot := &execdriver.User{}
	rootUID, rootGID := daemon.GetRemappedUIDGID()
	if rootUID != 0 {
		remappedRoot.UID = rootUID
		remappedRoot.GID = rootGID
	}
	uidMap, gidMap := daemon.GetUIDGIDMaps()

	defaultCgroupParent := "/docker"
	if daemon.configStore.CgroupParent != "" {
		defaultCgroupParent = daemon.configStore.CgroupParent
	} else {
		for _, option := range daemon.configStore.ExecOptions {
			key, val, err := parsers.ParseKeyValueOpt(option)
			if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
				continue
			}
			if val == "systemd" {
				defaultCgroupParent = "system.slice"
			}
		}
	}
	c.Command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			InitPath:      "/.dockerinit",
			MountLabel:    c.GetMountLabel(),
			Network:       en,
			ProcessConfig: processConfig,
			ProcessLabel:  c.GetProcessLabel(),
			Rootfs:        c.BaseFS,
			Resources:     resources,
			WorkingDir:    c.Config.WorkingDir,
		},
		AllowedDevices:     allowedDevices,
		AppArmorProfile:    c.AppArmorProfile,
		AutoCreatedDevices: autoCreatedDevices,
		CapAdd:             c.HostConfig.CapAdd.Slice(),
		CapDrop:            c.HostConfig.CapDrop.Slice(),
		CgroupParent:       defaultCgroupParent,
		GIDMapping:         gidMap,
		GroupAdd:           c.HostConfig.GroupAdd,
		Ipc:                ipc,
		OomScoreAdj:        c.HostConfig.OomScoreAdj,
		Pid:                pid,
		ReadonlyRootfs:     c.HostConfig.ReadonlyRootfs,
		RemappedRoot:       remappedRoot,
		SeccompProfile:     c.SeccompProfile,
		UIDMapping:         uidMap,
		UTS:                uts,
	}
	if c.HostConfig.CgroupParent != "" {
		c.Command.CgroupParent = c.HostConfig.CgroupParent
	}

	return nil
}
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
	en := &execdriver.Network{
		Interface: nil,
	}

	parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2)
	switch parts[0] {
	case "none":
	case "default", "": // empty string to support existing containers
		if !c.Config.NetworkDisabled {
			en.Interface = &execdriver.NetworkInterface{
				MacAddress:   c.Config.MacAddress,
				Bridge:       daemon.configStore.bridgeConfig.VirtualSwitchName,
				PortBindings: c.HostConfig.PortBindings,

				// TODO Windows. Include IPAddress. There already is a
				// property IPAddress on execDrive.CommonNetworkInterface,
				// but there is no CLI option in docker to pass through
				// an IPAddress on docker run.
			}
		}
	default:
		return derr.ErrorCodeInvalidNetworkMode.WithArgs(c.HostConfig.NetworkMode)
	}

	// TODO Windows. More resource controls to be implemented later.
	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			CPUShares: c.HostConfig.CPUShares,
		},
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		ConsoleSize: c.HostConfig.ConsoleSize,
	}

	processConfig.Env = env

	var layerPaths []string
	img, err := daemon.imageStore.Get(c.ImageID)
	if err != nil {
		return derr.ErrorCodeGetGraph.WithArgs(c.ImageID, err)
	}

	if img.RootFS != nil && img.RootFS.Type == "layers+base" {
		max := len(img.RootFS.DiffIDs)
		for i := 0; i <= max; i++ {
			img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i]
			path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID())
			if err != nil {
				return derr.ErrorCodeGetLayer.WithArgs(err)
			}
			// Reverse order, expecting parent most first
			layerPaths = append([]string{path}, layerPaths...)
		}
	}

	m, err := c.RWLayer.Metadata()
	if err != nil {
		return derr.ErrorCodeGetLayerMetadata.WithArgs(err)
	}
	layerFolder := m["dir"]

	var hvPartition bool
	// Work out the isolation (whether it is a hypervisor partition)
	if c.HostConfig.Isolation.IsDefault() {
		// Not specified by caller. Take daemon default
		hvPartition = windows.DefaultIsolation.IsHyperV()
	} else {
		// Take value specified by caller
		hvPartition = c.HostConfig.Isolation.IsHyperV()
	}

	c.Command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			Rootfs:        c.BaseFS,
			WorkingDir:    c.Config.WorkingDir,
			Network:       en,
			MountLabel:    c.GetMountLabel(),
			Resources:     resources,
			ProcessConfig: processConfig,
			ProcessLabel:  c.GetProcessLabel(),
		},
		FirstStart:  !c.HasBeenStartedBefore,
		LayerFolder: layerFolder,
		LayerPaths:  layerPaths,
		Hostname:    c.Config.Hostname,
		Isolation:   string(c.HostConfig.Isolation),
		ArgsEscaped: c.Config.ArgsEscaped,
		HvPartition: hvPartition,
	}

	return nil
}
func (daemon *Daemon) populateCommand(c *container.Container, env []string) error {
	en := &execdriver.Network{
		Interface: nil,
	}

	var epList []string

	// Connect all the libnetwork allocated networks to the container
	if c.NetworkSettings != nil {
		for n := range c.NetworkSettings.Networks {
			sn, err := daemon.FindNetwork(n)
			if err != nil {
				continue
			}

			ep, err := c.GetEndpointInNetwork(sn)
			if err != nil {
				continue
			}

			data, err := ep.DriverInfo()
			if err != nil {
				continue
			}
			if data["hnsid"] != nil {
				epList = append(epList, data["hnsid"].(string))
			}
		}
	}

	if daemon.netController == nil {
		parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2)
		switch parts[0] {
		case "none":
		case "default", "": // empty string to support existing containers
			if !c.Config.NetworkDisabled {
				en.Interface = &execdriver.NetworkInterface{
					MacAddress:   c.Config.MacAddress,
					Bridge:       daemon.configStore.bridgeConfig.Iface,
					PortBindings: c.HostConfig.PortBindings,

					// TODO Windows. Include IPAddress. There already is a
					// property IPAddress on execDrive.CommonNetworkInterface,
					// but there is no CLI option in docker to pass through
					// an IPAddress on docker run.
				}
			}
		default:
			return fmt.Errorf("invalid network mode: %s", c.HostConfig.NetworkMode)
		}
	}

	// TODO Windows. More resource controls to be implemented later.
	resources := &execdriver.Resources{
		CommonResources: execdriver.CommonResources{
			CPUShares: c.HostConfig.CPUShares,
		},
	}

	processConfig := execdriver.ProcessConfig{
		CommonProcessConfig: execdriver.CommonProcessConfig{
			Entrypoint: c.Path,
			Arguments:  c.Args,
			Tty:        c.Config.Tty,
		},
		ConsoleSize: c.HostConfig.ConsoleSize,
	}

	processConfig.Env = env

	var layerPaths []string
	img, err := daemon.imageStore.Get(c.ImageID)
	if err != nil {
		return fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err)
	}

	if img.RootFS != nil && img.RootFS.Type == "layers+base" {
		max := len(img.RootFS.DiffIDs)
		for i := 0; i <= max; i++ {
			img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i]
			path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID())
			if err != nil {
				return fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err)
			}
			// Reverse order, expecting parent most first
			layerPaths = append([]string{path}, layerPaths...)
		}
	}

	m, err := c.RWLayer.Metadata()
	if err != nil {
		return fmt.Errorf("Failed to get layer metadata - %s", err)
	}
	layerFolder := m["dir"]

	var hvPartition bool
	// Work out the isolation (whether it is a hypervisor partition)
	if c.HostConfig.Isolation.IsDefault() {
		// Not specified by caller. Take daemon default
		hvPartition = windows.DefaultIsolation.IsHyperV()
	} else {
		// Take value specified by caller
		hvPartition = c.HostConfig.Isolation.IsHyperV()
	}

	c.Command = &execdriver.Command{
		CommonCommand: execdriver.CommonCommand{
			ID:            c.ID,
			Rootfs:        c.BaseFS,
			WorkingDir:    c.Config.WorkingDir,
			Network:       en,
			MountLabel:    c.GetMountLabel(),
			Resources:     resources,
			ProcessConfig: processConfig,
			ProcessLabel:  c.GetProcessLabel(),
		},
		FirstStart:  !c.HasBeenStartedBefore,
		LayerFolder: layerFolder,
		LayerPaths:  layerPaths,
		Hostname:    c.Config.Hostname,
		Isolation:   string(c.HostConfig.Isolation),
		ArgsEscaped: c.Config.ArgsEscaped,
		HvPartition: hvPartition,
		EpList:      epList,
	}

	return nil
}
func (daemon *Daemon) setupIpcDirs(c *container.Container) error {
	var err error

	c.ShmPath, err = c.ShmResourcePath()
	if err != nil {
		return err
	}

	if c.HostConfig.IpcMode.IsContainer() {
		ic, err := daemon.getIpcContainer(c)
		if err != nil {
			return err
		}
		c.ShmPath = ic.ShmPath
	} else if c.HostConfig.IpcMode.IsHost() {
		if _, err := os.Stat("/dev/shm"); err != nil {
			return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host")
		}
		c.ShmPath = "/dev/shm"
	} else {
		rootUID, rootGID := daemon.GetRemappedUIDGID()
		if !c.HasMountFor("/dev/shm") {
			shmPath, err := c.ShmResourcePath()
			if err != nil {
				return err
			}

			if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil {
				return err
			}

			shmSize := container.DefaultSHMSize
			if c.HostConfig.ShmSize != 0 {
				shmSize = c.HostConfig.ShmSize
			}
			shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10)
			if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil {
				return fmt.Errorf("mounting shm tmpfs: %s", err)
			}
			if err := os.Chown(shmPath, rootUID, rootGID); err != nil {
				return err
			}
		}

	}

	return nil
}