Ejemplo n.º 1
0
Archivo: exec.go Proyecto: nixuw/docker
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: processConfig.User,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}
	// add CAP_ prefix to all caps for new libcontainer update to match
	// the spec format.
	for i, s := range p.Capabilities {
		if !strings.HasPrefix(s, "CAP_") {
			p.Capabilities[i] = fmt.Sprintf("CAP_%s", s)
		}
	}

	config := active.Config()
	if err := setupPipes(&config, processConfig, p, pipes); err != nil {
		return -1, err
	}

	if err := active.Start(p); err != nil {
		return -1, err
	}

	if hooks.Start != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}

		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, pid, chOOM)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
Ejemplo n.º 2
0
func dropList(drops []string) ([]string, error) {
	if stringutils.InSlice(drops, "all") {
		var newCaps []string
		for _, capName := range execdriver.GetAllCapabilities() {
			cap := execdriver.GetCapability(capName)
			logrus.Debugf("drop cap %s\n", cap.Key)
			numCap := fmt.Sprintf("%d", cap.Value)
			newCaps = append(newCaps, numCap)
		}
		return newCaps, nil
	}
	return []string{}, nil
}
Ejemplo n.º 3
0
func (d *Driver) setPrivileged(container *configs.Config) (err error) {
	container.Capabilities = execdriver.GetAllCapabilities()
	container.Cgroups.AllowAllDevices = true

	hostDevices, err := devices.HostDevices()
	if err != nil {
		return err
	}
	container.Devices = hostDevices

	if apparmor.IsEnabled() {
		container.AppArmorProfile = "unconfined"
	}
	return nil
}
Ejemplo n.º 4
0
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: processConfig.User,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}

	config := active.Config()
	if err := setupPipes(&config, processConfig, p, pipes); err != nil {
		return -1, err
	}

	if err := active.Start(p); err != nil {
		return -1, err
	}

	if startCallback != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}
		startCallback(&c.ProcessConfig, pid)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
Ejemplo n.º 5
0
// Config takes ContainerJSON and Daemon Info and converts it into the opencontainers spec.
func Config(c types.ContainerJSON, info types.Info, capabilities []string) (config *specs.LinuxSpec, err error) {
	config = &specs.LinuxSpec{
		Spec: specs.Spec{
			Version: SpecVersion,
			Platform: specs.Platform{
				OS:   info.OSType,
				Arch: info.Architecture,
			},
			Process: specs.Process{
				Terminal: c.Config.Tty,
				User:     specs.User{
				// TODO: user stuffs
				},
				Args: append([]string{c.Path}, c.Args...),
				Env:  c.Config.Env,
				Cwd:  c.Config.WorkingDir,
			},
			Root: specs.Root{
				Path:     "rootfs",
				Readonly: c.HostConfig.ReadonlyRootfs,
			},
			Mounts: []specs.MountPoint{},
		},
	}

	// make sure the current working directory is not blank
	if config.Process.Cwd == "" {
		config.Process.Cwd = DefaultCurrentWorkingDirectory
	}

	// get the user
	if c.Config.User != "" {
		u, err := user.LookupUser(c.Config.User)
		if err != nil {
			config.Spec.Process.User = specs.User{
				UID: uint32(u.Uid),
				GID: uint32(u.Gid),
			}
		} else {
			//return nil, fmt.Errorf("Looking up user (%s) failed: %v", c.Config.User, err)
			logrus.Warnf("Looking up user (%s) failed: %v", c.Config.User, err)
		}
	}
	// add the additional groups
	for _, group := range c.HostConfig.GroupAdd {
		g, err := user.LookupGroup(group)
		if err != nil {
			return nil, fmt.Errorf("Looking up group (%s) failed: %v", group, err)
		}
		config.Spec.Process.User.AdditionalGids = append(config.Spec.Process.User.AdditionalGids, uint32(g.Gid))
	}

	// get the hostname, if the hostname is the name as the first 12 characters of the id,
	// then set the hostname as the container name
	if c.ID[:12] == c.Config.Hostname {
		config.Hostname = strings.TrimPrefix(c.Name, "/")
	}

	// get mounts
	mounts := map[string]bool{}
	for _, mount := range c.Mounts {
		mounts[mount.Destination] = true
		config.Mounts = append(config.Mounts, specs.MountPoint{
			Name: mount.Destination,
			Path: mount.Destination,
		})
	}

	// add /etc/hosts and /etc/resolv.conf if we should have networking
	if c.HostConfig.NetworkMode != "none" && c.HostConfig.NetworkMode != "host" {
		DefaultMounts = append(DefaultMounts, NetworkMounts...)
	}

	// if we aren't doing something crazy like mounting a default mount ourselves,
	// the we can mount it the default way
	for _, mount := range DefaultMounts {
		if _, ok := mounts[mount.Path]; !ok {
			config.Mounts = append(config.Mounts, mount)
		}
	}

	// set privileged
	if c.HostConfig.Privileged {
		// allow all caps
		capabilities = execdriver.GetAllCapabilities()
	}

	// get the capabilities
	config.Linux.Capabilities, err = execdriver.TweakCapabilities(capabilities, c.HostConfig.CapAdd.Slice(), c.HostConfig.CapDrop.Slice())
	if err != nil {
		return nil, fmt.Errorf("setting capabilities failed: %v", err)
	}

	// add CAP_ prefix
	// TODO: this is awful
	for i, cap := range config.Linux.Capabilities {
		if !strings.HasPrefix(cap, "CAP_") {
			config.Linux.Capabilities[i] = fmt.Sprintf("CAP_%s", cap)
		}
	}

	// if we have a container that needs a terminal but no env vars, then set
	// default env vars for the terminal to function
	if config.Spec.Process.Terminal && len(config.Spec.Process.Env) <= 0 {
		config.Spec.Process.Env = DefaultTerminalEnv
	}
	if config.Spec.Process.Terminal {
		// make sure we have TERM set
		var termSet bool
		for _, env := range config.Spec.Process.Env {
			if strings.HasPrefix(env, "TERM=") {
				termSet = true
				break
			}
		}
		if !termSet {
			// set the term variable
			config.Spec.Process.Env = append(config.Spec.Process.Env, fmt.Sprintf("TERM=%s", DefaultTerminal))
		}
	}

	return config, nil
}
Ejemplo n.º 6
0
func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	var term execdriver.Terminal
	var err error

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: processConfig.User,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}

	if processConfig.Tty {
		config := active.Config()
		rootuid, err := config.HostUID()
		if err != nil {
			return -1, err
		}
		cons, err := p.NewConsole(rootuid)
		if err != nil {
			return -1, err
		}
		term, err = NewTtyConsole(cons, pipes, rootuid)
	} else {
		p.Stdout = pipes.Stdout
		p.Stderr = pipes.Stderr
		p.Stdin = pipes.Stdin
		term = &execdriver.StdConsole{}
	}
	if err != nil {
		return -1, err
	}

	processConfig.Terminal = term

	if err := active.Start(p); err != nil {
		return -1, err
	}

	if startCallback != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}
		startCallback(&c.ProcessConfig, pid)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
Ejemplo n.º 7
0
// Exec implements the exec driver Driver interface,
// it calls libcontainer APIs to execute a container.
func (d *Driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, hooks execdriver.Hooks) (int, error) {
	active := d.activeContainers[c.ID]
	if active == nil {
		return -1, fmt.Errorf("No active container exists with ID %s", c.ID)
	}

	user := processConfig.User
	if c.RemappedRoot.UID != 0 && user == "" {
		//if user namespaces are enabled, set user explicitly so uid/gid is set to 0
		//otherwise we end up with the overflow id and no permissions (65534)
		user = "******"
	}

	p := &libcontainer.Process{
		Args: append([]string{processConfig.Entrypoint}, processConfig.Arguments...),
		Env:  c.ProcessConfig.Env,
		Cwd:  c.WorkingDir,
		User: user,
	}

	if processConfig.Privileged {
		p.Capabilities = execdriver.GetAllCapabilities()
	}
	// add CAP_ prefix to all caps for new libcontainer update to match
	// the spec format.
	for i, s := range p.Capabilities {
		if !strings.HasPrefix(s, "CAP_") {
			p.Capabilities[i] = fmt.Sprintf("CAP_%s", s)
		}
	}

	config := active.Config()
	wg := sync.WaitGroup{}
	writers, err := setupPipes(&config, processConfig, p, pipes, &wg)
	if err != nil {
		return -1, err
	}

	if err := active.Start(p); err != nil {
		return -1, err
	}
	//close the write end of any opened pipes now that they are dup'ed into the container
	for _, writer := range writers {
		writer.Close()
	}

	if hooks.Start != nil {
		pid, err := p.Pid()
		if err != nil {
			p.Signal(os.Kill)
			p.Wait()
			return -1, err
		}

		// A closed channel for OOM is returned here as it will be
		// non-blocking and return the correct result when read.
		chOOM := make(chan struct{})
		close(chOOM)
		hooks.Start(&c.ProcessConfig, pid, chOOM)
	}

	ps, err := p.Wait()
	if err != nil {
		exitErr, ok := err.(*exec.ExitError)
		if !ok {
			return -1, err
		}
		ps = exitErr.ProcessState
	}
	// wait for all IO goroutine copiers to finish
	wg.Wait()
	return utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), nil
}
Ejemplo n.º 8
0
// Config takes ContainerJSON and converts it into the opencontainers spec.
func Config(c types.ContainerJSON, osType, architecture string, capabilities []string, idroot, idlen uint32) (config *specs.Spec, err error) {
	// for user namespaces use defaults unless another range specified
	if idroot == 0 {
		idroot = DefaultUserNSHostID
	}
	if idlen == 0 {
		idlen = DefaultUserNSMapSize
	}
	config = &specs.Spec{
		Version: SpecVersion,
		Platform: specs.Platform{
			OS:   osType,
			Arch: architecture,
		},
		Process: specs.Process{
			Terminal: c.Config.Tty,
			User:     specs.User{
			// TODO: user stuffs
			},
			Args: append([]string{c.Path}, c.Args...),
			Env:  c.Config.Env,
			Cwd:  c.Config.WorkingDir,
			// TODO: add parsing of Ulimits
			Rlimits: []specs.Rlimit{
				{
					Type: "RLIMIT_NOFILE",
					Hard: uint64(1024),
					Soft: uint64(1024),
				},
			},
			NoNewPrivileges: true,
			ApparmorProfile: c.AppArmorProfile,
		},
		Root: specs.Root{
			Path:     "rootfs",
			Readonly: c.HostConfig.ReadonlyRootfs,
		},
		Mounts: []specs.Mount{},
		Linux: specs.Linux{
			Namespaces: []specs.Namespace{
				{
					Type: "ipc",
				},
				{
					Type: "uts",
				},
				{
					Type: "mount",
				},
			},
			UIDMappings: []specs.IDMapping{
				{
					ContainerID: 0,
					HostID:      idroot,
					Size:        idlen,
				},
			},
			GIDMappings: []specs.IDMapping{
				{
					ContainerID: 0,
					HostID:      idroot,
					Size:        idlen,
				},
			},
			Resources: &specs.Resources{
				Devices: []specs.DeviceCgroup{
					{
						Allow:  false,
						Access: sPtr("rwm"),
					},
				},
				DisableOOMKiller: c.HostConfig.Resources.OomKillDisable,
				OOMScoreAdj:      &c.HostConfig.OomScoreAdj,
				Memory: &specs.Memory{
					Limit:       uint64ptr(c.HostConfig.Resources.Memory),
					Reservation: uint64ptr(c.HostConfig.Resources.MemoryReservation),
					Swap:        uint64ptr(c.HostConfig.Resources.MemorySwap),
					Swappiness:  uint64ptr(*c.HostConfig.Resources.MemorySwappiness),
					Kernel:      uint64ptr(c.HostConfig.Resources.KernelMemory),
				},
				CPU: &specs.CPU{
					Shares: uint64ptr(c.HostConfig.Resources.CPUShares),
					Quota:  uint64ptr(c.HostConfig.Resources.CPUQuota),
					Period: uint64ptr(c.HostConfig.Resources.CPUPeriod),
					Cpus:   &c.HostConfig.Resources.CpusetCpus,
					Mems:   &c.HostConfig.Resources.CpusetMems,
				},
				Pids: &specs.Pids{
					Limit: &c.HostConfig.Resources.PidsLimit,
				},
				BlockIO: &specs.BlockIO{
					Weight: &c.HostConfig.Resources.BlkioWeight,
					// TODO: add parsing for Throttle/Weight Devices
				},
			},
			RootfsPropagation: "",
		},
	}

	// make sure the current working directory is not blank
	if config.Process.Cwd == "" {
		config.Process.Cwd = DefaultCurrentWorkingDirectory
	}

	// get the user
	if c.Config.User != "" {
		u, err := user.LookupUser(c.Config.User)
		if err != nil {
			config.Process.User = specs.User{
				UID: uint32(u.Uid),
				GID: uint32(u.Gid),
			}
		} else {
			//return nil, fmt.Errorf("Looking up user (%s) failed: %v", c.Config.User, err)
			logrus.Warnf("Looking up user (%s) failed: %v", c.Config.User, err)
		}
	}
	// add the additional groups
	for _, group := range c.HostConfig.GroupAdd {
		g, err := user.LookupGroup(group)
		if err != nil {
			return nil, fmt.Errorf("Looking up group (%s) failed: %v", group, err)
		}
		config.Process.User.AdditionalGids = append(config.Process.User.AdditionalGids, uint32(g.Gid))
	}

	// get the hostname, if the hostname is the name as the first 12 characters of the id,
	// then set the hostname as the container name
	if c.ID[:12] == c.Config.Hostname {
		config.Hostname = strings.TrimPrefix(c.Name, "/")
	}

	// set privileged
	if c.HostConfig.Privileged {
		// allow all caps
		capabilities = execdriver.GetAllCapabilities()
	}

	// get the capabilities
	config.Process.Capabilities, err = execdriver.TweakCapabilities(capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop)
	if err != nil {
		return nil, fmt.Errorf("setting capabilities failed: %v", err)
	}

	// add CAP_ prefix
	// TODO: this is awful
	for i, cap := range config.Process.Capabilities {
		if !strings.HasPrefix(cap, "CAP_") {
			config.Process.Capabilities[i] = fmt.Sprintf("CAP_%s", cap)
		}
	}

	// if we have a container that needs a terminal but no env vars, then set
	// default env vars for the terminal to function
	if config.Process.Terminal && len(config.Process.Env) <= 0 {
		config.Process.Env = DefaultTerminalEnv
	}
	if config.Process.Terminal {
		// make sure we have TERM set
		var termSet bool
		for _, env := range config.Process.Env {
			if strings.HasPrefix(env, "TERM=") {
				termSet = true
				break
			}
		}
		if !termSet {
			// set the term variable
			config.Process.Env = append(config.Process.Env, fmt.Sprintf("TERM=%s", DefaultTerminal))
		}
	}

	// check namespaces
	if !c.HostConfig.NetworkMode.IsHost() {
		config.Linux.Namespaces = append(config.Linux.Namespaces, specs.Namespace{
			Type: "network",
		})
	}
	if !c.HostConfig.PidMode.IsHost() {
		config.Linux.Namespaces = append(config.Linux.Namespaces, specs.Namespace{
			Type: "pid",
		})
	}
	if c.HostConfig.UsernsMode.Valid() && !c.HostConfig.NetworkMode.IsHost() && !c.HostConfig.PidMode.IsHost() && !c.HostConfig.Privileged {
		config.Linux.Namespaces = append(config.Linux.Namespaces, specs.Namespace{
			Type: "user",
		})
	} else {
		// reset uid and gid mappings
		config.Linux.UIDMappings = []specs.IDMapping{}
		config.Linux.GIDMappings = []specs.IDMapping{}
	}

	// get mounts
	mounts := map[string]bool{}
	for _, mount := range c.Mounts {
		mounts[mount.Destination] = true
		var opt []string
		if mount.RW {
			opt = append(opt, "rw")
		}
		if mount.Mode != "" {
			opt = append(opt, mount.Mode)
		}
		opt = append(opt, []string{"rbind", "rprivate"}...)

		config.Mounts = append(config.Mounts, specs.Mount{
			Destination: mount.Destination,
			Type:        "bind",
			Source:      mount.Source,
			Options:     opt,
		})
	}

	// add /etc/hosts and /etc/resolv.conf if we should have networking
	if c.HostConfig.NetworkMode != "none" && c.HostConfig.NetworkMode != "host" {
		DefaultMounts = append(DefaultMounts, NetworkMounts...)
	}

	// if we aren't doing something crazy like mounting a default mount ourselves,
	// the we can mount it the default way
	for _, mount := range DefaultMounts {
		if _, ok := mounts[mount.Destination]; !ok {
			config.Mounts = append(config.Mounts, mount)
		}
	}

	// fix default mounts for cgroups and devpts without user namespaces
	// see: https://github.com/opencontainers/runc/issues/225#issuecomment-136519577
	if len(config.Linux.UIDMappings) == 0 {
		for k, mount := range config.Mounts {
			switch mount.Destination {
			case "/sys/fs/cgroup":
				config.Mounts[k].Options = append(config.Mounts[k].Options, "ro")
			case "/dev/pts":
				config.Mounts[k].Options = append(config.Mounts[k].Options, "gid=5")
			}
		}
	}

	// parse additional groups and add them to gid mappings
	if err := parseMappings(config, c.HostConfig); err != nil {
		return nil, err
	}

	// parse devices
	if err := parseDevices(config, c.HostConfig); err != nil {
		return nil, err
	}

	// parse security opt
	if err := parseSecurityOpt(config, c.HostConfig); err != nil {
		return nil, err
	}

	// set privileged
	if c.HostConfig.Privileged {
		if !c.HostConfig.ReadonlyRootfs {
			// clear readonly for cgroup
			//	config.Mounts["cgroup"] = DefaultMountpoints["cgroup"]
		}
	}

	return config, nil
}