Пример #1
0
func (d *Driver) setupRemappedRoot(container *configs.Config, c *execdriver.Command) error {
	if c.RemappedRoot.UID == 0 {
		container.Namespaces.Remove(configs.NEWUSER)
		return nil
	}

	// convert the Docker daemon id map to the libcontainer variant of the same struct
	// this keeps us from having to import libcontainer code across Docker client + daemon packages
	cuidMaps := []configs.IDMap{}
	cgidMaps := []configs.IDMap{}
	for _, idMap := range c.UIDMapping {
		cuidMaps = append(cuidMaps, configs.IDMap(idMap))
	}
	for _, idMap := range c.GIDMapping {
		cgidMaps = append(cgidMaps, configs.IDMap(idMap))
	}
	container.UidMappings = cuidMaps
	container.GidMappings = cgidMaps

	for _, node := range container.Devices {
		node.Uid = uint32(c.RemappedRoot.UID)
		node.Gid = uint32(c.RemappedRoot.GID)
	}
	// TODO: until a kernel/mount solution exists for handling remount in a user namespace,
	// we must clear the readonly flag for the cgroups mount (@mrunalp concurs)
	for i := range container.Mounts {
		if container.Mounts[i].Device == "cgroup" {
			container.Mounts[i].Flags &= ^syscall.MS_RDONLY
		}
	}

	return nil
}
Пример #2
0
func createLibcontainerNetwork(spec *LinuxSpec, config *configs.Config) error {
	for _, network := range spec.Networks {
		n := &configs.Network{
			Type:              network.Type,
			Name:              network.Name,
			Bridge:            network.Bridge,
			MacAddress:        network.MacAddress,
			Address:           network.Address,
			Gateway:           network.Gateway,
			Mtu:               network.Mtu,
			TxQueueLen:        network.TxQueueLen,
			IPv6Address:       network.IPv6Address,
			IPv6Gateway:       network.IPv6Gateway,
			HostInterfaceName: network.HostInterfaceName,
			HairpinMode:       network.HairpinMode,
		}
		config.Networks = append(config.Networks, n)
	}
	for _, route := range spec.Routes {
		r := &configs.Route{
			Destination:   route.Destination,
			Source:        route.Source,
			Gateway:       route.Gateway,
			InterfaceName: route.InterfaceName,
		}
		config.Routes = append(config.Routes, r)
	}
	return nil
}
Пример #3
0
func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {
	if l.Root == "" {
		return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
	}
	if err := l.validateID(id); err != nil {
		return nil, err
	}
	if err := l.Validator.Validate(config); err != nil {
		return nil, newGenericError(err, ConfigInvalid)
	}
	uid, err := config.HostUID()
	if err != nil {
		return nil, newGenericError(err, SystemError)
	}
	gid, err := config.HostGID()
	if err != nil {
		return nil, newGenericError(err, SystemError)
	}
	containerRoot := filepath.Join(l.Root, id)
	if _, err := os.Stat(containerRoot); err == nil {
		return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse)
	} else if !os.IsNotExist(err) {
		return nil, newGenericError(err, SystemError)
	}
	if err := os.MkdirAll(containerRoot, 0711); err != nil {
		return nil, newGenericError(err, SystemError)
	}
	if err := os.Chown(containerRoot, uid, gid); err != nil {
		return nil, newGenericError(err, SystemError)
	}
	fifoName := filepath.Join(containerRoot, execFifoFilename)
	oldMask := syscall.Umask(0000)
	if err := syscall.Mkfifo(fifoName, 0622); err != nil {
		syscall.Umask(oldMask)
		return nil, newGenericError(err, SystemError)
	}
	syscall.Umask(oldMask)
	if err := os.Chown(fifoName, uid, gid); err != nil {
		return nil, newGenericError(err, SystemError)
	}
	c := &linuxContainer{
		id:            id,
		root:          containerRoot,
		config:        config,
		initPath:      l.InitPath,
		initArgs:      l.InitArgs,
		criuPath:      l.CriuPath,
		cgroupManager: l.NewCgroupsManager(config.Cgroups, nil),
	}
	c.state = &stoppedState{c: c}
	return c, nil
}
Пример #4
0
func (d *Driver) setPrivileged(container *configs.Config) (err error) {
	container.Capabilities = execdriver.GetAllCapabilities()
	container.Cgroups.AllowAllDevices = true

	hostDevices, err := devices.HostDevices()
	if err != nil {
		return err
	}
	container.Devices = hostDevices

	if apparmor.IsEnabled() {
		container.AppArmorProfile = "unconfined"
	}
	return nil
}
Пример #5
0
func createHooks(rspec *specs.LinuxRuntimeSpec, config *configs.Config) {
	config.Hooks = &configs.Hooks{}
	for _, h := range rspec.Hooks.Prestart {
		cmd := configs.Command{
			Path: h.Path,
			Args: h.Args,
			Env:  h.Env,
		}
		config.Hooks.Prestart = append(config.Hooks.Prestart, configs.NewCommandHook(cmd))
	}
	for _, h := range rspec.Hooks.Poststart {
		cmd := configs.Command{
			Path: h.Path,
			Args: h.Args,
			Env:  h.Env,
		}
		config.Hooks.Poststart = append(config.Hooks.Poststart, configs.NewCommandHook(cmd))
	}
	for _, h := range rspec.Hooks.Poststop {
		cmd := configs.Command{
			Path: h.Path,
			Args: h.Args,
			Env:  h.Env,
		}
		config.Hooks.Poststop = append(config.Hooks.Poststop, configs.NewCommandHook(cmd))
	}
}
Пример #6
0
func (d *Driver) createUTS(container *configs.Config, c *execdriver.Command) error {
	if c.UTS.HostUTS {
		container.Namespaces.Remove(configs.NEWUTS)
		container.Hostname = ""
		return nil
	}

	return nil
}
Пример #7
0
func createDevices(spec *LinuxSpec, config *configs.Config) error {
	for _, name := range spec.Devices {
		d, err := devices.DeviceFromPath(filepath.Join("/dev", name), "rwm")
		if err != nil {
			return err
		}
		config.Devices = append(config.Devices, d)
	}
	return nil
}
Пример #8
0
func addBindMount(config *configs.Config, src, dest string, writeable bool) {
	flags := syscall.MS_BIND | syscall.MS_REC
	if !writeable {
		flags |= syscall.MS_RDONLY
	}
	config.Mounts = append(config.Mounts, &configs.Mount{
		Source:      src,
		Destination: dest,
		Device:      "bind",
		Flags:       flags,
	})
}
Пример #9
0
func setupUserNamespace(spec *LinuxSpec, config *configs.Config) error {
	if len(spec.UserMapping) == 0 {
		return nil
	}
	config.Namespaces.Add(configs.NEWUSER, "")
	mappings := make(map[string][]configs.IDMap)
	for k, v := range spec.UserMapping {
		mappings[k] = append(mappings[k], configs.IDMap{
			ContainerID: v.From,
			HostID:      v.To,
			Size:        v.Count,
		})
	}
	config.UidMappings = mappings["uid"]
	config.GidMappings = mappings["gid"]
	rootUid, err := config.HostUID()
	if err != nil {
		return err
	}
	rootGid, err := config.HostGID()
	if err != nil {
		return err
	}
	for _, node := range config.Devices {
		node.Uid = uint32(rootUid)
		node.Gid = uint32(rootGid)
	}
	return nil
}
Пример #10
0
func (r *libcontainerRuntime) setupUserNamespace(spec *specs.LinuxRuntimeSpec, config *configs.Config) error {
	if len(spec.Linux.UIDMappings) == 0 {
		return nil
	}
	config.Namespaces.Add(configs.NEWUSER, "")
	create := func(m specs.IDMapping) configs.IDMap {
		return configs.IDMap{
			HostID:      int(m.HostID),
			ContainerID: int(m.ContainerID),
			Size:        int(m.Size),
		}
	}
	for _, m := range spec.Linux.UIDMappings {
		config.UidMappings = append(config.UidMappings, create(m))
	}
	for _, m := range spec.Linux.GIDMappings {
		config.GidMappings = append(config.GidMappings, create(m))
	}
	rootUID, err := config.HostUID()
	if err != nil {
		return err
	}
	rootGID, err := config.HostGID()
	if err != nil {
		return err
	}
	for _, node := range config.Devices {
		node.Uid = uint32(rootUID)
		node.Gid = uint32(rootGID)
	}
	return nil
}
Пример #11
0
func setupUserNamespace(spec *specs.Spec, config *configs.Config) error {
	if len(spec.Linux.UIDMappings) == 0 {
		return nil
	}
	// do not override the specified user namespace path
	if config.Namespaces.PathOf(configs.NEWUSER) == "" {
		config.Namespaces.Add(configs.NEWUSER, "")
	}
	create := func(m specs.IDMapping) configs.IDMap {
		return configs.IDMap{
			HostID:      int(m.HostID),
			ContainerID: int(m.ContainerID),
			Size:        int(m.Size),
		}
	}
	for _, m := range spec.Linux.UIDMappings {
		config.UidMappings = append(config.UidMappings, create(m))
	}
	for _, m := range spec.Linux.GIDMappings {
		config.GidMappings = append(config.GidMappings, create(m))
	}
	rootUID, err := config.HostUID()
	if err != nil {
		return err
	}
	rootGID, err := config.HostGID()
	if err != nil {
		return err
	}
	for _, node := range config.Devices {
		node.Uid = uint32(rootUID)
		node.Gid = uint32(rootGID)
	}
	return nil
}
Пример #12
0
func (d *Driver) setupRlimits(container *configs.Config, c *execdriver.Command) {
	if c.Resources == nil {
		return
	}

	for _, rlimit := range c.Resources.Rlimits {
		container.Rlimits = append(container.Rlimits, configs.Rlimit{
			Type: rlimit.Type,
			Hard: rlimit.Hard,
			Soft: rlimit.Soft,
		})
	}
}
Пример #13
0
func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) error {
	userMounts := make(map[string]struct{})
	for _, m := range c.Mounts {
		userMounts[m.Destination] = struct{}{}
	}

	// Filter out mounts that are overriden by user supplied mounts
	var defaultMounts []*configs.Mount
	_, mountDev := userMounts["/dev"]
	for _, m := range container.Mounts {
		if _, ok := userMounts[m.Destination]; !ok {
			if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
				container.Devices = nil
				continue
			}
			defaultMounts = append(defaultMounts, m)
		}
	}
	container.Mounts = defaultMounts

	for _, m := range c.Mounts {
		flags := syscall.MS_BIND | syscall.MS_REC
		if !m.Writable {
			flags |= syscall.MS_RDONLY
		}
		if m.Slave {
			flags |= syscall.MS_SLAVE
		}

		container.Mounts = append(container.Mounts, &configs.Mount{
			Source:      m.Source,
			Destination: m.Destination,
			Device:      "bind",
			Flags:       flags,
		})
	}
	return nil
}
Пример #14
0
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error {
	var term execdriver.Terminal
	var err error

	if processConfig.Tty {
		rootuid, err := container.HostUID()
		if err != nil {
			return err
		}
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return err
		}
		term, err = NewTtyConsole(cons, pipes)
	} else {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr
		r, w, err := os.Pipe()
		if err != nil {
			return err
		}
		if pipes.Stdin != nil {
			go func() {
				io.Copy(w, pipes.Stdin)
				w.Close()
			}()
			p.Stdin = r
		}
		term = &execdriver.StdConsole{}
	}
	if err != nil {
		return err
	}
	processConfig.Terminal = term
	return nil
}
Пример #15
0
func createHooks(rspec *specs.Spec, config *configs.Config) {
	config.Hooks = &configs.Hooks{}
	for _, h := range rspec.Hooks.Prestart {
		cmd := createCommandHook(h)
		config.Hooks.Prestart = append(config.Hooks.Prestart, configs.NewCommandHook(cmd))
	}
	for _, h := range rspec.Hooks.Poststart {
		cmd := createCommandHook(h)
		config.Hooks.Poststart = append(config.Hooks.Poststart, configs.NewCommandHook(cmd))
	}
	for _, h := range rspec.Hooks.Poststop {
		cmd := createCommandHook(h)
		config.Hooks.Poststop = append(config.Hooks.Poststop, configs.NewCommandHook(cmd))
	}
}
Пример #16
0
func (d *Driver) createNetwork(container *configs.Config, c *execdriver.Command, hooks execdriver.Hooks) error {
	if c.Network == nil {
		return nil
	}
	if c.Network.ContainerID != "" {
		d.Lock()
		active := d.activeContainers[c.Network.ContainerID]
		d.Unlock()

		if active == nil {
			return fmt.Errorf("%s is not a valid running container to join", c.Network.ContainerID)
		}

		state, err := active.State()
		if err != nil {
			return err
		}

		container.Namespaces.Add(configs.NEWNET, state.NamespacePaths[configs.NEWNET])
		return nil
	}

	if c.Network.NamespacePath != "" {
		container.Namespaces.Add(configs.NEWNET, c.Network.NamespacePath)
		return nil
	}
	// only set up prestart hook if the namespace path is not set (this should be
	// all cases *except* for --net=host shared networking)
	container.Hooks = &configs.Hooks{
		Prestart: []configs.Hook{
			configs.NewFunctionHook(func(s configs.HookState) error {
				if len(hooks.PreStart) > 0 {
					for _, fnHook := range hooks.PreStart {
						// A closed channel for OOM is returned here as it will be
						// non-blocking and return the correct result when read.
						chOOM := make(chan struct{})
						close(chOOM)
						if err := fnHook(&c.ProcessConfig, s.Pid, chOOM); err != nil {
							return err
						}
					}
				}
				return nil
			}),
		},
	}
	return nil
}
Пример #17
0
func convertOldConfigToNew(config v1Config) *configs.Config {
	var (
		result configs.Config
		old    *v1Cgroup = config.Cgroup
	)
	result.Rootfs = config.Config.Rootfs
	result.Hostname = config.Config.Hostname
	result.Namespaces = config.Config.Namespaces
	result.Capabilities = config.Config.Capabilities
	result.Networks = config.Config.Networks
	result.Routes = config.Config.Routes

	var newCgroup = &configs.Cgroup{
		Name:   old.Name,
		Parent: old.Parent,
		Resources: &configs.Resources{
			AllowAllDevices:   old.Resources.AllowAllDevices,
			AllowedDevices:    old.Resources.AllowedDevices,
			DeniedDevices:     old.Resources.DeniedDevices,
			Memory:            old.Resources.Memory,
			MemoryReservation: old.Resources.MemoryReservation,
			MemorySwap:        old.Resources.MemorySwap,
			KernelMemory:      old.Resources.KernelMemory,
			CpuShares:         old.Resources.CpuShares,
			CpuQuota:          old.Resources.CpuQuota,
			CpuPeriod:         old.Resources.CpuPeriod,
			CpuRtRuntime:      old.Resources.CpuRtRuntime,
			CpuRtPeriod:       old.Resources.CpuRtPeriod,
			CpusetCpus:        old.Resources.CpusetCpus,
			CpusetMems:        old.Resources.CpusetMems,
			BlkioWeight:       old.Resources.BlkioWeight,
			BlkioLeafWeight:   old.Resources.BlkioLeafWeight,
			Freezer:           old.Resources.Freezer,
			HugetlbLimit:      old.Resources.HugetlbLimit,
			OomKillDisable:    old.Resources.OomKillDisable,
			MemorySwappiness:  old.Resources.MemorySwappiness,
			NetPrioIfpriomap:  old.Resources.NetPrioIfpriomap,
			NetClsClassid:     old.Resources.NetClsClassid,
		},
	}

	result.Cgroups = newCgroup

	return &result
}
Пример #18
0
func (r *libcontainerRuntime) createDevices(spec *specs.LinuxRuntimeSpec, config *configs.Config) error {
	for _, d := range spec.Linux.Devices {
		device := &configs.Device{
			Type:        d.Type,
			Path:        d.Path,
			Major:       d.Major,
			Minor:       d.Minor,
			Permissions: d.Permissions,
			FileMode:    d.FileMode,
			Uid:         d.UID,
			Gid:         d.GID,
		}
		config.Devices = append(config.Devices, device)
	}
	return nil
}
Пример #19
0
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes, wg *sync.WaitGroup) ([]io.WriteCloser, error) {

	writers := []io.WriteCloser{}

	rootuid, err := container.HostUID()
	if err != nil {
		return writers, err
	}

	if processConfig.Tty {
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return writers, err
		}
		term, err := NewTtyConsole(cons, pipes)
		if err != nil {
			return writers, err
		}
		processConfig.Terminal = term
		return writers, nil
	}
	// not a tty--set up stdio pipes
	term := &execdriver.StdConsole{}
	processConfig.Terminal = term

	// if we are not in a user namespace, there is no reason to go through
	// the hassle of setting up os-level pipes with proper (remapped) ownership
	// so we will do the prior shortcut for non-userns containers
	if rootuid == 0 {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr

		r, w, err := os.Pipe()
		if err != nil {
			return writers, err
		}
		if pipes.Stdin != nil {
			go func() {
				io.Copy(w, pipes.Stdin)
				w.Close()
			}()
			p.Stdin = r
		}
		return writers, nil
	}

	// if we have user namespaces enabled (rootuid != 0), we will set
	// up os pipes for stderr, stdout, stdin so we can chown them to
	// the proper ownership to allow for proper access to the underlying
	// fds
	var fds []uintptr

	copyPipes := func(out io.Writer, in io.ReadCloser) {
		defer wg.Done()
		io.Copy(out, in)
		in.Close()
	}

	//setup stdout
	r, w, err := os.Pipe()
	if err != nil {
		w.Close()
		return writers, err
	}
	writers = append(writers, w)
	fds = append(fds, r.Fd(), w.Fd())
	if pipes.Stdout != nil {
		wg.Add(1)
		go copyPipes(pipes.Stdout, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stdout = w

	//setup stderr
	r, w, err = os.Pipe()
	if err != nil {
		w.Close()
		return writers, err
	}
	writers = append(writers, w)
	fds = append(fds, r.Fd(), w.Fd())
	if pipes.Stderr != nil {
		wg.Add(1)
		go copyPipes(pipes.Stderr, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stderr = w

	//setup stdin
	r, w, err = os.Pipe()
	if err != nil {
		r.Close()
		return writers, err
	}
	fds = append(fds, r.Fd(), w.Fd())
	if pipes.Stdin != nil {
		go func() {
			io.Copy(w, pipes.Stdin)
			w.Close()
		}()
		p.Stdin = r
	}
	for _, fd := range fds {
		if err := syscall.Fchown(int(fd), rootuid, rootuid); err != nil {
			return writers, fmt.Errorf("Failed to chown pipes fd: %v", err)
		}
	}
	return writers, nil
}
Пример #20
0
func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) error {
	userMounts := make(map[string]struct{})
	for _, m := range c.Mounts {
		userMounts[m.Destination] = struct{}{}
	}

	// Filter out mounts that are overriden by user supplied mounts
	var defaultMounts []*configs.Mount
	_, mountDev := userMounts["/dev"]
	for _, m := range container.Mounts {
		if _, ok := userMounts[m.Destination]; !ok {
			if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
				container.Devices = nil
				continue
			}
			defaultMounts = append(defaultMounts, m)
		}
	}
	container.Mounts = defaultMounts

	for _, m := range c.Mounts {
		for _, cm := range container.Mounts {
			if cm.Destination == m.Destination {
				return derr.ErrorCodeMountDup.WithArgs(m.Destination)
			}
		}

		if m.Source == "tmpfs" {
			var (
				data  = "size=65536k"
				flags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
				err   error
			)
			fulldest := filepath.Join(c.Rootfs, m.Destination)
			if m.Data != "" {
				flags, data, err = mount.ParseTmpfsOptions(m.Data)
				if err != nil {
					return err
				}
			}
			container.Mounts = append(container.Mounts, &configs.Mount{
				Source:        m.Source,
				Destination:   m.Destination,
				Data:          data,
				Device:        "tmpfs",
				Flags:         flags,
				PremountCmds:  genTmpfsPremountCmd(c.TmpDir, fulldest, m.Destination),
				PostmountCmds: genTmpfsPostmountCmd(c.TmpDir, fulldest, m.Destination),
			})
			continue
		}
		flags := syscall.MS_BIND | syscall.MS_REC
		if !m.Writable {
			flags |= syscall.MS_RDONLY
		}
		if m.Slave {
			flags |= syscall.MS_SLAVE
		}

		container.Mounts = append(container.Mounts, &configs.Mount{
			Source:      m.Source,
			Destination: m.Destination,
			Device:      "bind",
			Flags:       flags,
		})
	}
	return nil
}
Пример #21
0
// SetRootPropagation sets the root mount propagation mode.
func SetRootPropagation(config *configs.Config, propagation int) {
	config.RootPropagation = propagation
}
Пример #22
0
func setupPipes(container *configs.Config, processConfig *execdriver.ProcessConfig, p *libcontainer.Process, pipes *execdriver.Pipes) error {

	rootuid, err := container.HostUID()
	if err != nil {
		return err
	}

	if processConfig.Tty {
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return err
		}
		term, err := NewTtyConsole(cons, pipes)
		if err != nil {
			return err
		}
		processConfig.Terminal = term
		return nil
	}
	// not a tty--set up stdio pipes
	term := &execdriver.StdConsole{}
	processConfig.Terminal = term

	// if we are not in a user namespace, there is no reason to go through
	// the hassle of setting up os-level pipes with proper (remapped) ownership
	// so we will do the prior shortcut for non-userns containers
	if rootuid == 0 {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr

		r, w, err := os.Pipe()
		if err != nil {
			return err
		}
		if pipes.Stdin != nil {
			go func() {
				io.Copy(w, pipes.Stdin)
				w.Close()
			}()
			p.Stdin = r
		}
		return nil
	}

	// if we have user namespaces enabled (rootuid != 0), we will set
	// up os pipes for stderr, stdout, stdin so we can chown them to
	// the proper ownership to allow for proper access to the underlying
	// fds
	var fds []int

	//setup stdout
	r, w, err := os.Pipe()
	if err != nil {
		return err
	}
	fds = append(fds, int(r.Fd()), int(w.Fd()))
	if pipes.Stdout != nil {
		go io.Copy(pipes.Stdout, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stdout = w

	//setup stderr
	r, w, err = os.Pipe()
	if err != nil {
		return err
	}
	fds = append(fds, int(r.Fd()), int(w.Fd()))
	if pipes.Stderr != nil {
		go io.Copy(pipes.Stderr, r)
	}
	term.Closers = append(term.Closers, r)
	p.Stderr = w

	//setup stdin
	r, w, err = os.Pipe()
	if err != nil {
		return err
	}
	fds = append(fds, int(r.Fd()), int(w.Fd()))
	if pipes.Stdin != nil {
		go func() {
			io.Copy(w, pipes.Stdin)
			w.Close()
		}()
		p.Stdin = r
	}
	for _, fd := range fds {
		if err := syscall.Fchown(fd, rootuid, rootuid); err != nil {
			return fmt.Errorf("Failed to chown pipes fd: %v", err)
		}
	}
	return nil
}
Пример #23
0
func (d *Driver) setupMounts(container *configs.Config, c *execdriver.Command) error {
	userMounts := make(map[string]struct{})
	for _, m := range c.Mounts {
		userMounts[m.Destination] = struct{}{}
	}

	// Filter out mounts that are overridden by user supplied mounts
	var defaultMounts []*configs.Mount
	_, mountDev := userMounts["/dev"]
	for _, m := range container.Mounts {
		if _, ok := userMounts[m.Destination]; !ok {
			if mountDev && strings.HasPrefix(m.Destination, "/dev/") {
				container.Devices = nil
				continue
			}
			defaultMounts = append(defaultMounts, m)
		}
	}
	container.Mounts = defaultMounts

	mountPropagationMap := map[string]int{
		"private":  mount.PRIVATE,
		"rprivate": mount.RPRIVATE,
		"shared":   mount.SHARED,
		"rshared":  mount.RSHARED,
		"slave":    mount.SLAVE,
		"rslave":   mount.RSLAVE,
	}

	for _, m := range c.Mounts {
		for _, cm := range container.Mounts {
			if cm.Destination == m.Destination {
				return derr.ErrorCodeMountDup.WithArgs(m.Destination)
			}
		}

		if m.Source == "tmpfs" {
			var (
				data  = "size=65536k"
				flags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
				err   error
			)
			if m.Data != "" {
				flags, data, err = mount.ParseTmpfsOptions(m.Data)
				if err != nil {
					return err
				}
			}
			container.Mounts = append(container.Mounts, &configs.Mount{
				Source:           m.Source,
				Destination:      m.Destination,
				Data:             data,
				Device:           "tmpfs",
				Flags:            flags,
				PropagationFlags: []int{mountPropagationMap[volume.DefaultPropagationMode]},
			})
			continue
		}
		flags := syscall.MS_BIND | syscall.MS_REC
		var pFlag int
		if !m.Writable {
			flags |= syscall.MS_RDONLY
		}

		// Determine property of RootPropagation based on volume
		// properties. If a volume is shared, then keep root propagtion
		// shared. This should work for slave and private volumes too.
		//
		// For slave volumes, it can be either [r]shared/[r]slave.
		//
		// For private volumes any root propagation value should work.

		pFlag = mountPropagationMap[m.Propagation]
		if pFlag == mount.SHARED || pFlag == mount.RSHARED {
			if err := ensureShared(m.Source); err != nil {
				return err
			}
			rootpg := container.RootPropagation
			if rootpg != mount.SHARED && rootpg != mount.RSHARED {
				execdriver.SetRootPropagation(container, mount.SHARED)
			}
		} else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE {
			if err := ensureSharedOrSlave(m.Source); err != nil {
				return err
			}
			rootpg := container.RootPropagation
			if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE {
				execdriver.SetRootPropagation(container, mount.RSLAVE)
			}
		}

		mount := &configs.Mount{
			Source:      m.Source,
			Destination: m.Destination,
			Device:      "bind",
			Flags:       flags,
		}

		if pFlag != 0 {
			mount.PropagationFlags = []int{pFlag}
		}

		container.Mounts = append(container.Mounts, mount)
	}

	checkResetVolumePropagation(container)
	return nil
}
Пример #24
0
// TODO(vmarmol): Deprecate over time as old Dockers are phased out.
func ReadConfig(dockerRoot, dockerRun, containerID string) (*configs.Config, error) {
	// Try using the new config if it is available.
	configPath := configPath(dockerRun, containerID)
	if utils.FileExists(configPath) {
		out, err := ioutil.ReadFile(configPath)
		if err != nil {
			return nil, err
		}

		var state libcontainer.State
		if err = json.Unmarshal(out, &state); err != nil {
			if _, ok := err.(*json.UnmarshalTypeError); ok {
				// Since some fields changes in Cgroup struct, it will be failed while unmarshalling to libcontainer.State struct.
				// This failure is caused by a change of runc(https://github.com/opencontainers/runc/commit/c6e406af243fab0c9636539c1cb5f4d60fe0787f).
				// If we encountered the UnmarshalTypeError, try to unmarshal it again to v1State struct and convert it.
				var state v1State
				err2 := json.Unmarshal(out, &state)
				if err2 != nil {
					return nil, err
				}
				return convertOldConfigToNew(state.Config), nil
			} else {
				return nil, err
			}
		}
		return &state.Config, nil
	}

	// Fallback to reading the old config which is comprised of the state and config files.
	oldConfigPath := oldConfigPath(dockerRoot, containerID)
	out, err := ioutil.ReadFile(oldConfigPath)
	if err != nil {
		return nil, err
	}

	// Try reading the preAPIConfig.
	var config preAPIConfig
	err = json.Unmarshal(out, &config)
	if err != nil {
		// Try to parse the old pre-API config. The main difference is that namespaces used to be a map, now it is a slice of structs.
		// The JSON marshaler will use the non-nested field before the nested one.
		type oldLibcontainerConfig struct {
			preAPIConfig
			OldNamespaces map[string]bool `json:"namespaces,omitempty"`
		}
		var oldConfig oldLibcontainerConfig
		err2 := json.Unmarshal(out, &oldConfig)
		if err2 != nil {
			// Use original error.
			return nil, err
		}

		// Translate the old pre-API config into the new config.
		config = oldConfig.preAPIConfig
		for ns := range oldConfig.OldNamespaces {
			config.Namespaces = append(config.Namespaces, configs.Namespace{
				Type: configs.NamespaceType(ns),
			})
		}
	}

	// Read the old state file as well.
	state, err := readState(dockerRoot, containerID)
	if err != nil {
		return nil, err
	}

	// Convert preAPIConfig + old state file to Config.
	// This only converts some of the fields, the ones we use.
	// You may need to add fields if the one you're interested in is not available.
	var result configs.Config
	result.Cgroups = new(configs.Cgroup)
	result.Rootfs = config.RootFs
	result.Hostname = config.Hostname
	result.Namespaces = config.Namespaces
	result.Capabilities = config.Capabilities
	for _, net := range config.Networks {
		n := &configs.Network{
			Name:              state.NetworkState.VethChild,
			Bridge:            net.Bridge,
			MacAddress:        net.MacAddress,
			Address:           net.Address,
			Gateway:           net.Gateway,
			IPv6Address:       net.IPv6Address,
			IPv6Gateway:       net.IPv6Gateway,
			HostInterfaceName: state.NetworkState.VethHost,
		}
		result.Networks = append(result.Networks, n)
	}
	result.Routes = config.Routes
	if config.Cgroups != nil {
		result.Cgroups = config.Cgroups
	}

	return &result, nil
}
Пример #25
0
func createDevices(spec *specs.Spec, config *configs.Config) error {
	// add whitelisted devices
	config.Devices = []*configs.Device{
		{
			Type:     'c',
			Path:     "/dev/null",
			Major:    1,
			Minor:    3,
			FileMode: 0666,
			Uid:      0,
			Gid:      0,
		},
		{
			Type:     'c',
			Path:     "/dev/random",
			Major:    1,
			Minor:    8,
			FileMode: 0666,
			Uid:      0,
			Gid:      0,
		},
		{
			Type:     'c',
			Path:     "/dev/full",
			Major:    1,
			Minor:    7,
			FileMode: 0666,
			Uid:      0,
			Gid:      0,
		},
		{
			Type:     'c',
			Path:     "/dev/tty",
			Major:    5,
			Minor:    0,
			FileMode: 0666,
			Uid:      0,
			Gid:      0,
		},
		{
			Type:     'c',
			Path:     "/dev/zero",
			Major:    1,
			Minor:    5,
			FileMode: 0666,
			Uid:      0,
			Gid:      0,
		},
		{
			Type:     'c',
			Path:     "/dev/urandom",
			Major:    1,
			Minor:    9,
			FileMode: 0666,
			Uid:      0,
			Gid:      0,
		},
	}
	// merge in additional devices from the spec
	for _, d := range spec.Linux.Devices {
		var uid, gid uint32
		if d.UID != nil {
			uid = *d.UID
		}
		if d.GID != nil {
			gid = *d.GID
		}
		dt, err := stringToDeviceRune(d.Type)
		if err != nil {
			return err
		}
		device := &configs.Device{
			Type:     dt,
			Path:     d.Path,
			Major:    d.Major,
			Minor:    d.Minor,
			FileMode: *d.FileMode,
			Uid:      uid,
			Gid:      gid,
		}
		config.Devices = append(config.Devices, device)
	}
	return nil
}
Пример #26
0
func (d *Driver) setupLabels(container *configs.Config, c *execdriver.Command) {
	container.ProcessLabel = c.ProcessLabel
	container.MountLabel = c.MountLabel
}
Пример #27
0
func runContainer(name string,
	args []string,
	wd string,
	stdin io.Reader,
	stdout io.Writer,
	stderr io.Writer) error {
	var err error
	var id string

	id = path.Base(wd)

	// mount base rootfs with working directory
	rootfs := master_config.Rootfs
	lowerdir := rootfs
	upperdir, err := filepath.Abs(wd)
	if err != nil {
		return err
	}
	workdir, err := filepath.Abs(fmt.Sprintf("%s-%s", wd, "work"))
	if err != nil {
		return err
	}

	err = os.Mkdir(workdir, 0775)
	if err != nil && !os.IsExist(err) {
		return err
	}
	defer os.RemoveAll(workdir)
	opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s",
		lowerdir, upperdir, workdir)
	err = syscall.Mount("overlay", upperdir, "overlay", syscall.MS_MGC_VAL,
		opts)
	if err != nil {
		return err
	}
	defer func() {
		err := syscall.Unmount(upperdir, 0)
		if err != nil {
			return
		}
	}()

	// set cgroup path
	var config configs.Config
	config = *master_config
	config.Cgroups.Path = fmt.Sprintf("%s/%s",
		config.Cgroups.Path, id)
	config.Rootfs = upperdir
	container, err := factory.Create(id, &config)
	if err != nil {
		return err
	}
	defer container.Destroy()

	args = append([]string{name}, args...)
	process := &libcontainer.Process{
		Args:   args,
		Env:    []string{"PATH=/bin:/sbin:/usr/bin:/usr/sbin"},
		User:   "******",
		Stdin:  stdin,
		Stdout: stdout,
		Stderr: stderr,
	}

	err = container.Run(process)
	if err != nil {
		return err
	}

	_, err = process.Wait()
	if err != nil {
		return err
	}

	return nil
}