Esempio n. 1
0
func (container *Container) createVolumes() error {
	binds, err := container.getBindMap()
	if err != nil {
		return err
	}
	volumesDriver := container.runtime.volumes.driver
	// Create the requested volumes if they don't exist
	for volPath := range container.Config.Volumes {
		volPath = path.Clean(volPath)
		volIsDir := true
		// Skip existing volumes
		if _, exists := container.Volumes[volPath]; exists {
			continue
		}
		var srcPath string
		var isBindMount bool
		srcRW := false
		// If an external bind is defined for this volume, use that as a source
		if bindMap, exists := binds[volPath]; exists {
			isBindMount = true
			srcPath = bindMap.SrcPath
			if strings.ToLower(bindMap.Mode) == "rw" {
				srcRW = true
			}
			if stat, err := os.Stat(bindMap.SrcPath); err != nil {
				return err
			} else {
				volIsDir = stat.IsDir()
			}
			// Otherwise create an directory in $ROOT/volumes/ and use that
		} else {

			// Do not pass a container as the parameter for the volume creation.
			// The graph driver using the container's information ( Image ) to
			// create the parent.
			c, err := container.runtime.volumes.Create(nil, nil, "", "", nil)
			if err != nil {
				return err
			}
			srcPath, err = volumesDriver.Get(c.ID)
			if err != nil {
				return fmt.Errorf("Driver %s failed to get volume rootfs %s: %s", volumesDriver, c.ID, err)
			}
			srcRW = true // RW by default
		}

		if p, err := filepath.EvalSymlinks(srcPath); err != nil {
			return err
		} else {
			srcPath = p
		}

		container.Volumes[volPath] = srcPath
		container.VolumesRW[volPath] = srcRW

		// Create the mountpoint
		volPath = path.Join(container.basefs, volPath)
		rootVolPath, err := utils.FollowSymlinkInScope(volPath, container.basefs)
		if err != nil {
			return err
		}

		if _, err := os.Stat(rootVolPath); err != nil {
			if os.IsNotExist(err) {
				if volIsDir {
					if err := os.MkdirAll(rootVolPath, 0755); err != nil {
						return err
					}
				} else {
					if err := os.MkdirAll(path.Dir(rootVolPath), 0755); err != nil {
						return err
					}
					if f, err := os.OpenFile(rootVolPath, os.O_CREATE, 0755); err != nil {
						return err
					} else {
						f.Close()
					}
				}
			}
		}

		// Do not copy or change permissions if we are mounting from the host
		if srcRW && !isBindMount {
			volList, err := ioutil.ReadDir(rootVolPath)
			if err != nil {
				return err
			}
			if len(volList) > 0 {
				srcList, err := ioutil.ReadDir(srcPath)
				if err != nil {
					return err
				}
				if len(srcList) == 0 {
					// If the source volume is empty copy files from the root into the volume
					if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
						return err
					}

					var stat syscall.Stat_t
					if err := syscall.Stat(rootVolPath, &stat); err != nil {
						return err
					}
					var srcStat syscall.Stat_t
					if err := syscall.Stat(srcPath, &srcStat); err != nil {
						return err
					}
					// Change the source volume's ownership if it differs from the root
					// files that were just copied
					if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
						if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
							return err
						}
					}
				}
			}
		}
	}
	return nil
}
Esempio n. 2
0
func (container *Container) Start() (err error) {
	container.Lock()
	defer container.Unlock()

	if container.State.IsRunning() {
		return fmt.Errorf("The container %s is already running.", container.ID)
	}

	defer func() {
		if err != nil {
			container.cleanup()
		}
	}()

	if err := container.Mount(); err != nil {
		return err
	}

	if container.runtime.config.DisableNetwork {
		container.Config.NetworkDisabled = true
		container.buildHostnameAndHostsFiles("127.0.1.1")
	} else {
		if err := container.allocateNetwork(); err != nil {
			return err
		}
		container.buildHostnameAndHostsFiles(container.NetworkSettings.IPAddress)
	}

	// Make sure the config is compatible with the current kernel
	if container.Config.Memory > 0 && !container.runtime.sysInfo.MemoryLimit {
		log.Printf("WARNING: Your kernel does not support memory limit capabilities. Limitation discarded.\n")
		container.Config.Memory = 0
	}
	if container.Config.Memory > 0 && !container.runtime.sysInfo.SwapLimit {
		log.Printf("WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
		container.Config.MemorySwap = -1
	}

	if container.runtime.sysInfo.IPv4ForwardingDisabled {
		log.Printf("WARNING: IPv4 forwarding is disabled. Networking will not work")
	}

	if container.Volumes == nil || len(container.Volumes) == 0 {
		container.Volumes = make(map[string]string)
		container.VolumesRW = make(map[string]bool)
	}

	// Apply volumes from another container if requested
	if err := container.applyExternalVolumes(); err != nil {
		return err
	}

	if err := container.createVolumes(); err != nil {
		return err
	}

	// Setup environment
	env := []string{
		"HOME=/",
		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
		"HOSTNAME=" + container.Config.Hostname,
	}

	if container.Config.Tty {
		env = append(env, "TERM=xterm")
	}

	// Init any links between the parent and children
	runtime := container.runtime

	children, err := runtime.Children(container.Name)
	if err != nil {
		return err
	}

	if len(children) > 0 {
		container.activeLinks = make(map[string]*Link, len(children))

		// If we encounter an error make sure that we rollback any network
		// config and ip table changes
		rollback := func() {
			for _, link := range container.activeLinks {
				link.Disable()
			}
			container.activeLinks = nil
		}

		for p, child := range children {
			link, err := NewLink(container, child, p, runtime.eng)
			if err != nil {
				rollback()
				return err
			}

			container.activeLinks[link.Alias()] = link
			if err := link.Enable(); err != nil {
				rollback()
				return err
			}

			for _, envVar := range link.ToEnv() {
				env = append(env, envVar)
			}
		}
	}

	for _, elem := range container.Config.Env {
		env = append(env, elem)
	}

	if err := container.generateEnvConfig(env); err != nil {
		return err
	}

	if container.Config.WorkingDir != "" {
		container.Config.WorkingDir = path.Clean(container.Config.WorkingDir)
		if err := os.MkdirAll(path.Join(container.basefs, container.Config.WorkingDir), 0755); err != nil {
			return nil
		}
	}

	envPath, err := container.EnvConfigPath()
	if err != nil {
		return err
	}

	// Setup the root fs as a bind mount of the base fs
	root := container.RootfsPath()
	if err := os.MkdirAll(root, 0755); err != nil && !os.IsExist(err) {
		return nil
	}

	// Create a bind mount of the base fs as a place where we can add mounts
	// without affecting the ability to access the base fs
	if err := mount.Mount(container.basefs, root, "none", "bind,rw"); err != nil {
		return err
	}

	// Make sure the root fs is private so the mounts here don't propagate to basefs
	if err := mount.ForceMount(root, root, "none", "private"); err != nil {
		return err
	}

	// Mount docker specific files into the containers root fs
	if err := mount.Mount(runtime.sysInitPath, path.Join(root, "/.dockerinit"), "none", "bind,ro"); err != nil {
		return err
	}
	if err := mount.Mount(envPath, path.Join(root, "/.dockerenv"), "none", "bind,ro"); err != nil {
		return err
	}
	if err := mount.Mount(container.ResolvConfPath, path.Join(root, "/etc/resolv.conf"), "none", "bind,ro"); err != nil {
		return err
	}

	if container.HostnamePath != "" && container.HostsPath != "" {
		if err := mount.Mount(container.HostnamePath, path.Join(root, "/etc/hostname"), "none", "bind,ro"); err != nil {
			return err
		}
		if err := mount.Mount(container.HostsPath, path.Join(root, "/etc/hosts"), "none", "bind,ro"); err != nil {
			return err
		}
	}

	// Mount user specified volumes
	for r, v := range container.Volumes {
		mountAs := "ro"
		if container.VolumesRW[r] {
			mountAs = "rw"
		}

		r = path.Join(root, r)
		if p, err := utils.FollowSymlinkInScope(r, root); err != nil {
			return err
		} else {
			r = p
		}

		if err := mount.Mount(v, r, "none", fmt.Sprintf("bind,%s", mountAs)); err != nil {
			return err
		}
	}

	populateCommand(container)

	// Setup logging of stdout and stderr to disk
	if err := container.runtime.LogToDisk(container.stdout, container.logPath("json"), "stdout"); err != nil {
		return err
	}
	if err := container.runtime.LogToDisk(container.stderr, container.logPath("json"), "stderr"); err != nil {
		return err
	}
	container.waitLock = make(chan struct{})

	// Setuping pipes and/or Pty
	var setup func() error
	if container.Config.Tty {
		setup = container.setupPty
	} else {
		setup = container.setupStd
	}
	if err := setup(); err != nil {
		return err
	}

	callbackLock := make(chan struct{})
	callback := func(command *execdriver.Command) {
		container.State.SetRunning(command.Pid())
		if command.Tty {
			// The callback is called after the process Start()
			// so we are in the parent process. In TTY mode, stdin/out/err is the PtySlace
			// which we close here.
			if c, ok := command.Stdout.(io.Closer); ok {
				c.Close()
			}
		}
		if err := container.ToDisk(); err != nil {
			utils.Debugf("%s", err)
		}
		close(callbackLock)
	}

	// We use a callback here instead of a goroutine and an chan for
	// syncronization purposes
	cErr := utils.Go(func() error { return container.monitor(callback) })

	// Start should not return until the process is actually running
	select {
	case <-callbackLock:
	case err := <-cErr:
		return err
	}
	return nil
}