示例#1
0
func copyFromContainer(ctx context.Context, dockerCli *client.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) {
	if dstPath != "-" {
		// Get an absolute destination path.
		dstPath, err = resolveLocalPath(dstPath)
		if err != nil {
			return err
		}
	}

	// if client requests to follow symbol link, then must decide target file to be copied
	var rebaseName string
	if cpParam.followLink {
		srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath)

		// If the destination is a symbolic link, we should follow it.
		if err == nil && srcStat.Mode&os.ModeSymlink != 0 {
			linkTarget := srcStat.LinkTarget
			if !system.IsAbs(linkTarget) {
				// Join with the parent directory.
				srcParent, _ := archive.SplitPathDirEntry(srcPath)
				linkTarget = filepath.Join(srcParent, linkTarget)
			}

			linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget)
			srcPath = linkTarget
		}

	}

	content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath)
	if err != nil {
		return err
	}
	defer content.Close()

	if dstPath == "-" {
		// Send the response to STDOUT.
		_, err = io.Copy(os.Stdout, content)

		return err
	}

	// Prepare source copy info.
	srcInfo := archive.CopyInfo{
		Path:       srcPath,
		Exists:     true,
		IsDir:      stat.Mode.IsDir(),
		RebaseName: rebaseName,
	}

	preArchive := content
	if len(srcInfo.RebaseName) != 0 {
		_, srcBase := archive.SplitPathDirEntry(srcInfo.Path)
		preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName)
	}
	// See comments in the implementation of `archive.CopyTo` for exactly what
	// goes into deciding how and whether the source archive needs to be
	// altered for the correct copy behavior.
	return archive.CopyTo(preArchive, srcInfo, dstPath)
}
示例#2
0
// normaliseWorkdir normalises a user requested working directory in a
// platform sematically consistent way.
func normaliseWorkdir(current string, requested string) (string, error) {
	if requested == "" {
		return "", fmt.Errorf("cannot normalise nothing")
	}

	current = filepath.FromSlash(current)
	requested = filepath.FromSlash(requested)

	// Target semantics is C:\somefolder, specifically in the format:
	// UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already
	// guaranteed that `current`, if set, is consistent. This allows us to
	// cope correctly with any of the following in a Dockerfile:
	//	WORKDIR a                       --> C:\a
	//	WORKDIR c:\\foo                 --> C:\foo
	//	WORKDIR \\foo                   --> C:\foo
	//	WORKDIR /foo                    --> C:\foo
	//	WORKDIR c:\\foo \ WORKDIR bar   --> C:\foo --> C:\foo\bar
	//	WORKDIR C:/foo \ WORKDIR bar    --> C:\foo --> C:\foo\bar
	//	WORKDIR C:/foo \ WORKDIR \\bar  --> C:\foo --> C:\bar
	//	WORKDIR /foo \ WORKDIR c:/bar   --> C:\foo --> C:\bar
	if len(current) == 0 || system.IsAbs(requested) {
		if (requested[0] == os.PathSeparator) ||
			(len(requested) > 1 && string(requested[1]) != ":") ||
			(len(requested) == 1) {
			requested = filepath.Join(`C:\`, requested)
		}
	} else {
		requested = filepath.Join(current, requested)
	}
	// Upper-case drive letter
	return (strings.ToUpper(string(requested[0])) + requested[1:]), nil
}
示例#3
0
文件: daemon.go 项目: rzzdy/docker
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) {

	// First perform verification of settings common across all platforms.
	if config != nil {
		if config.WorkingDir != "" {
			config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics
			if !system.IsAbs(config.WorkingDir) {
				return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path.", config.WorkingDir)
			}
		}
	}

	if hostConfig == nil {
		return nil, nil
	}

	for port := range hostConfig.PortBindings {
		_, portStr := nat.SplitProtoPort(string(port))
		if _, err := nat.ParsePort(portStr); err != nil {
			return nil, fmt.Errorf("Invalid port specification: %q", portStr)
		}
		for _, pb := range hostConfig.PortBindings[port] {
			_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
			if err != nil {
				return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort)
			}
		}
	}

	// Now do platform-specific verification
	return verifyPlatformContainerSettings(daemon, hostConfig, config)
}
示例#4
0
// normaliseDest normalises the destination of a COPY/ADD command in a
// platform semantically consistent way.
func normaliseDest(cmdName, workingDir, requested string) (string, error) {
	dest := filepath.FromSlash(requested)
	endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator))

	// We are guaranteed that the working directory is already consistent,
	// However, Windows also has, for now, the limitation that ADD/COPY can
	// only be done to the system drive, not any drives that might be present
	// as a result of a bind mount.
	//
	// So... if the path requested is Linux-style absolute (/foo or \\foo),
	// we assume it is the system drive. If it is a Windows-style absolute
	// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
	// strip any configured working directories drive letter so that it
	// can be subsequently legitimately converted to a Windows volume-style
	// pathname.

	// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
	// we only want to validate where the DriveColon part has been supplied.
	if filepath.IsAbs(dest) {
		if strings.ToUpper(string(dest[0])) != "C" {
			return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName)
		}
		dest = dest[2:] // Strip the drive letter
	}

	// Cannot handle relative where WorkingDir is not the system drive.
	if len(workingDir) > 0 {
		if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
			return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
		}
		if !system.IsAbs(dest) {
			if string(workingDir[0]) != "C" {
				return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName)
			}
			dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
			// Make sure we preserve any trailing slash
			if endsInSlash {
				dest += string(os.PathSeparator)
			}
		}
	}
	return dest, nil
}
示例#5
0
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool, validateHostname bool) ([]string, error) {

	// First perform verification of settings common across all platforms.
	if config != nil {
		if config.WorkingDir != "" {
			config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics
			if !system.IsAbs(config.WorkingDir) {
				return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir)
			}
		}

		if len(config.StopSignal) > 0 {
			_, err := signal.ParseSignal(config.StopSignal)
			if err != nil {
				return nil, err
			}
		}

		// Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant.
		if validateHostname && len(config.Hostname) > 0 {
			// RFC1123 specifies that 63 bytes is the maximium length
			// Windows has the limitation of 63 bytes in length
			// Linux hostname is limited to HOST_NAME_MAX=64, not including the terminating null byte.
			// We limit the length to 63 bytes here to match RFC1035 and RFC1123.
			matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", config.Hostname)
			if len(config.Hostname) > 63 || !matched {
				return nil, fmt.Errorf("invalid hostname format: %s", config.Hostname)
			}
		}
	}

	if hostConfig == nil {
		return nil, nil
	}

	if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
		return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy")
	}

	for port := range hostConfig.PortBindings {
		_, portStr := nat.SplitProtoPort(string(port))
		if _, err := nat.ParsePort(portStr); err != nil {
			return nil, fmt.Errorf("invalid port specification: %q", portStr)
		}
		for _, pb := range hostConfig.PortBindings[port] {
			_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
			if err != nil {
				return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort)
			}
		}
	}

	// Now do platform-specific verification
	return verifyPlatformContainerSettings(daemon, hostConfig, config, update)
}
示例#6
0
// normaliseDest normalises the destination of a COPY/ADD command in a
// platform semantically consistent way.
func normaliseDest(cmdName, workingDir, requested string) (string, error) {
	dest := filepath.FromSlash(requested)
	endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator))
	if !system.IsAbs(requested) {
		dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
		// Make sure we preserve any trailing slash
		if endsInSlash {
			dest += string(os.PathSeparator)
		}
	}
	return dest, nil
}
示例#7
0
// normaliseWorkdir normalises a user requested working directory in a
// platform sematically consistent way.
func normaliseWorkdir(current string, requested string) (string, error) {
	if requested == "" {
		return "", fmt.Errorf("cannot normalise nothing")
	}

	// `filepath.Clean` will replace "" with "." so skip in that case
	if current != "" {
		current = filepath.Clean(current)
	}
	if requested != "" {
		requested = filepath.Clean(requested)
	}

	// If either current or requested in Windows is:
	// C:
	// C:.
	// then an error will be thrown as the definition for the above
	// refers to `current directory on drive C:`
	// Since filepath.Clean() will automatically normalize the above
	// to `C:.`, we only need to check the last format
	if pattern.MatchString(current) {
		return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current)
	}
	if pattern.MatchString(requested) {
		return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested)
	}

	// Target semantics is C:\somefolder, specifically in the format:
	// UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already
	// guaranteed that `current`, if set, is consistent. This allows us to
	// cope correctly with any of the following in a Dockerfile:
	//	WORKDIR a                       --> C:\a
	//	WORKDIR c:\\foo                 --> C:\foo
	//	WORKDIR \\foo                   --> C:\foo
	//	WORKDIR /foo                    --> C:\foo
	//	WORKDIR c:\\foo \ WORKDIR bar   --> C:\foo --> C:\foo\bar
	//	WORKDIR C:/foo \ WORKDIR bar    --> C:\foo --> C:\foo\bar
	//	WORKDIR C:/foo \ WORKDIR \\bar  --> C:\foo --> C:\bar
	//	WORKDIR /foo \ WORKDIR c:/bar   --> C:\foo --> C:\bar
	if len(current) == 0 || system.IsAbs(requested) {
		if (requested[0] == os.PathSeparator) ||
			(len(requested) > 1 && string(requested[1]) != ":") ||
			(len(requested) == 1) {
			requested = filepath.Join(`C:\`, requested)
		}
	} else {
		requested = filepath.Join(current, requested)
	}
	// Upper-case drive letter
	return (strings.ToUpper(string(requested[0])) + requested[1:]), nil
}
示例#8
0
// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be
// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by
// requiring a LOCALPATH with a `:` to be made explicit with a relative or
// absolute path:
// 	`/path/to/file:name.txt` or `./file:name.txt`
//
// This is apparently how `scp` handles this as well:
// 	http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/
//
// We can't simply check for a filepath separator because container names may
// have a separator, e.g., "host0/cname1" if container is in a Docker cluster,
// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows
// client, a `:` could be part of an absolute Windows path, in which case it
// is immediately proceeded by a backslash.
func splitCpArg(arg string) (container, path string) {
	if system.IsAbs(arg) {
		// Explicit local absolute path, e.g., `C:\foo` or `/foo`.
		return "", arg
	}

	parts := strings.SplitN(arg, ":", 2)

	if len(parts) == 1 || strings.HasPrefix(parts[0], ".") {
		// Either there's no `:` in the arg
		// OR it's an explicit local relative path like `./file:name.txt`.
		return "", arg
	}

	return parts[0], parts[1]
}
示例#9
0
// WORKDIR /tmp
//
// Set the working directory for future RUN/CMD/etc statements.
//
func workdir(b *Builder, args []string, attributes map[string]bool, original string) error {
	if len(args) != 1 {
		return errExactlyOneArgument("WORKDIR")
	}

	if err := b.flags.Parse(); err != nil {
		return err
	}

	// This is from the Dockerfile and will not necessarily be in platform
	// specific semantics, hence ensure it is converted.
	workdir := filepath.FromSlash(args[0])
	current := filepath.FromSlash(b.runConfig.WorkingDir)
	if runtime.GOOS == "windows" {
		// Windows is a little more complicated than Linux. This code ensures
		// we end up with a workdir which is consistent in terms of platform
		// semantics. This means C:\somefolder, specifically in the format:
		// UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already
		// guaranteed that `current`, if set, is consistent. This allows us to
		// cope correctly with any of the following in a Dockerfile:
		//	WORKDIR a                       --> C:\a
		//	WORKDIR c:\\foo                 --> C:\foo
		//	WORKDIR \\foo                   --> C:\foo
		//	WORKDIR /foo                    --> C:\foo
		//	WORKDIR c:\\foo \ WORKDIR bar   --> C:\foo --> C:\foo\bar
		//	WORKDIR C:/foo \ WORKDIR bar    --> C:\foo --> C:\foo\bar
		//	WORKDIR C:/foo \ WORKDIR \\bar  --> C:\foo --> C:\bar
		//	WORKDIR /foo \ WORKDIR c:/bar   --> C:\foo --> C:\bar
		if len(current) == 0 || system.IsAbs(workdir) {
			if (workdir[0] == os.PathSeparator) ||
				(len(workdir) > 1 && string(workdir[1]) != ":") ||
				(len(workdir) == 1) {
				workdir = filepath.Join(`C:\`, workdir)
			}
		} else {
			workdir = filepath.Join(current, workdir)
		}
		workdir = strings.ToUpper(string(workdir[0])) + workdir[1:] // Upper-case drive letter
	} else {
		if !filepath.IsAbs(workdir) {
			workdir = filepath.Join(string(os.PathSeparator), current, workdir)
		}
	}
	b.runConfig.WorkingDir = workdir

	return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
}
示例#10
0
文件: daemon.go 项目: fntlnz/docker
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {

	// First perform verification of settings common across all platforms.
	if config != nil {
		if config.WorkingDir != "" {
			config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics
			if !system.IsAbs(config.WorkingDir) {
				return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path", config.WorkingDir)
			}
		}

		if len(config.StopSignal) > 0 {
			_, err := signal.ParseSignal(config.StopSignal)
			if err != nil {
				return nil, err
			}
		}
	}

	if hostConfig == nil {
		return nil, nil
	}

	logCfg := daemon.getLogConfig(hostConfig.LogConfig)
	if err := logger.ValidateLogOpts(logCfg.Type, logCfg.Config); err != nil {
		return nil, err
	}

	for port := range hostConfig.PortBindings {
		_, portStr := nat.SplitProtoPort(string(port))
		if _, err := nat.ParsePort(portStr); err != nil {
			return nil, fmt.Errorf("Invalid port specification: %q", portStr)
		}
		for _, pb := range hostConfig.PortBindings[port] {
			_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
			if err != nil {
				return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort)
			}
		}
	}

	// Now do platform-specific verification
	return verifyPlatformContainerSettings(daemon, hostConfig, config, update)
}
示例#11
0
// WORKDIR /tmp
//
// Set the working directory for future RUN/CMD/etc statements.
//
func workdir(b *Builder, args []string, attributes map[string]bool, original string) error {
	if len(args) != 1 {
		return derr.ErrorCodeExactlyOneArg.WithArgs("WORKDIR")
	}

	if err := b.flags.Parse(); err != nil {
		return err
	}

	// This is from the Dockerfile and will not necessarily be in platform
	// specific semantics, hence ensure it is converted.
	workdir := filepath.FromSlash(args[0])

	if !system.IsAbs(workdir) {
		current := filepath.FromSlash(b.runConfig.WorkingDir)
		workdir = filepath.Join(string(os.PathSeparator), current, workdir)
	}

	b.runConfig.WorkingDir = workdir

	return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", workdir))
}
示例#12
0
func copyToContainer(ctx context.Context, dockerCli *client.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) {
	if srcPath != "-" {
		// Get an absolute source path.
		srcPath, err = resolveLocalPath(srcPath)
		if err != nil {
			return err
		}
	}

	// In order to get the copy behavior right, we need to know information
	// about both the source and destination. The API is a simple tar
	// archive/extract API but we can use the stat info header about the
	// destination to be more informed about exactly what the destination is.

	// Prepare destination copy info by stat-ing the container path.
	dstInfo := archive.CopyInfo{Path: dstPath}
	dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath)

	// If the destination is a symbolic link, we should evaluate it.
	if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
		linkTarget := dstStat.LinkTarget
		if !system.IsAbs(linkTarget) {
			// Join with the parent directory.
			dstParent, _ := archive.SplitPathDirEntry(dstPath)
			linkTarget = filepath.Join(dstParent, linkTarget)
		}

		dstInfo.Path = linkTarget
		dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget)
	}

	// Ignore any error and assume that the parent directory of the destination
	// path exists, in which case the copy may still succeed. If there is any
	// type of conflict (e.g., non-directory overwriting an existing directory
	// or vice versa) the extraction will fail. If the destination simply did
	// not exist, but the parent directory does, the extraction will still
	// succeed.
	if err == nil {
		dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir()
	}

	var (
		content         io.Reader
		resolvedDstPath string
	)

	if srcPath == "-" {
		// Use STDIN.
		content = os.Stdin
		resolvedDstPath = dstInfo.Path
		if !dstInfo.IsDir {
			return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath))
		}
	} else {
		// Prepare source copy info.
		srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink)
		if err != nil {
			return err
		}

		srcArchive, err := archive.TarResource(srcInfo)
		if err != nil {
			return err
		}
		defer srcArchive.Close()

		// With the stat info about the local source as well as the
		// destination, we have enough information to know whether we need to
		// alter the archive that we upload so that when the server extracts
		// it to the specified directory in the container we get the desired
		// copy behavior.

		// See comments in the implementation of `archive.PrepareArchiveCopy`
		// for exactly what goes into deciding how and whether the source
		// archive needs to be altered for the correct copy behavior when it is
		// extracted. This function also infers from the source and destination
		// info which directory to extract to, which may be the parent of the
		// destination that the user specified.
		dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo)
		if err != nil {
			return err
		}
		defer preparedArchive.Close()

		resolvedDstPath = dstDir
		content = preparedArchive
	}

	options := types.CopyToContainerOptions{
		AllowOverwriteDirWithFile: false,
	}

	return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options)
}
示例#13
0
// verifyContainerSettings performs validation of the hostconfig and config
// structures.
func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {

	// First perform verification of settings common across all platforms.
	if config != nil {
		if config.WorkingDir != "" {
			config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics
			if !system.IsAbs(config.WorkingDir) {
				return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir)
			}
		}

		if len(config.StopSignal) > 0 {
			_, err := signal.ParseSignal(config.StopSignal)
			if err != nil {
				return nil, err
			}
		}

		// Validate if Env contains empty variable or not (e.g., ``, `=foo`)
		for _, env := range config.Env {
			if _, err := opts.ValidateEnv(env); err != nil {
				return nil, err
			}
		}
	}

	if hostConfig == nil {
		return nil, nil
	}

	if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() {
		return nil, fmt.Errorf("can't create 'AutoRemove' container with restart policy")
	}

	for port := range hostConfig.PortBindings {
		_, portStr := nat.SplitProtoPort(string(port))
		if _, err := nat.ParsePort(portStr); err != nil {
			return nil, fmt.Errorf("invalid port specification: %q", portStr)
		}
		for _, pb := range hostConfig.PortBindings[port] {
			_, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort))
			if err != nil {
				return nil, fmt.Errorf("invalid port specification: %q", pb.HostPort)
			}
		}
	}

	p := hostConfig.RestartPolicy

	switch p.Name {
	case "always", "unless-stopped", "no":
		if p.MaximumRetryCount != 0 {
			return nil, fmt.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name)
		}
	case "on-failure":
		if p.MaximumRetryCount < 0 {
			return nil, fmt.Errorf("maximum retry count cannot be negative")
		}
	case "":
	// do nothing
	default:
		return nil, fmt.Errorf("invalid restart policy '%s'", p.Name)
	}

	// Now do platform-specific verification
	return verifyPlatformContainerSettings(daemon, hostConfig, config, update)
}
示例#14
0
func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error {

	// Work in daemon-specific OS filepath semantics. However, we save
	// the the origPath passed in here, as it might also be a URL which
	// we need to check for in this function.
	passedInOrigPath := origPath
	origPath = filepath.FromSlash(origPath)
	destPath = filepath.FromSlash(destPath)

	if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 {
		origPath = origPath[1:]
	}
	origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))

	// Twiddle the destPath when its a relative path - meaning, make it
	// relative to the WORKINGDIR
	if !system.IsAbs(destPath) {
		hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator))
		destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath)

		// Make sure we preserve any trailing slash
		if hasSlash {
			destPath += string(os.PathSeparator)
		}
	}

	// In the remote/URL case, download it and gen its hashcode
	if urlutil.IsURL(passedInOrigPath) {

		// As it's a URL, we go back to processing on what was passed in
		// to this function
		origPath = passedInOrigPath

		if !allowRemote {
			return fmt.Errorf("Source can't be a URL for %s", cmdName)
		}

		ci := copyInfo{}
		ci.origPath = origPath
		ci.hash = origPath // default to this but can change
		ci.destPath = destPath
		ci.decompress = false
		*cInfos = append(*cInfos, &ci)

		// Initiate the download
		resp, err := httputils.Download(ci.origPath)
		if err != nil {
			return err
		}

		// Create a tmp dir
		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
		if err != nil {
			return err
		}
		ci.tmpDir = tmpDirName

		// Create a tmp file within our tmp dir
		tmpFileName := filepath.Join(tmpDirName, "tmp")
		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
		if err != nil {
			return err
		}

		// Download and dump result to tmp file
		if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
			In:        resp.Body,
			Out:       b.OutOld,
			Formatter: b.StreamFormatter,
			Size:      resp.ContentLength,
			NewLines:  true,
			ID:        "",
			Action:    "Downloading",
		})); err != nil {
			tmpFile.Close()
			return err
		}
		fmt.Fprintf(b.OutStream, "\n")
		tmpFile.Close()

		// Set the mtime to the Last-Modified header value if present
		// Otherwise just remove atime and mtime
		times := make([]syscall.Timespec, 2)

		lastMod := resp.Header.Get("Last-Modified")
		if lastMod != "" {
			mTime, err := http.ParseTime(lastMod)
			// If we can't parse it then just let it default to 'zero'
			// otherwise use the parsed time value
			if err == nil {
				times[1] = syscall.NsecToTimespec(mTime.UnixNano())
			}
		}

		if err := system.UtimesNano(tmpFileName, times); err != nil {
			return err
		}

		ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))

		// If the destination is a directory, figure out the filename.
		if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) {
			u, err := url.Parse(origPath)
			if err != nil {
				return err
			}
			path := filepath.FromSlash(u.Path) // Ensure in platform semantics
			if strings.HasSuffix(path, string(os.PathSeparator)) {
				path = path[:len(path)-1]
			}
			parts := strings.Split(path, string(os.PathSeparator))
			filename := parts[len(parts)-1]
			if filename == "" {
				return fmt.Errorf("cannot determine filename from url: %s", u)
			}
			ci.destPath = ci.destPath + filename
		}

		// Calc the checksum, even if we're using the cache
		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
		if err != nil {
			return err
		}
		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1)
		if err != nil {
			return err
		}
		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
			return err
		}
		ci.hash = tarSum.Sum(nil)
		r.Close()

		return nil
	}

	// Deal with wildcards
	if allowWildcards && containsWildcards(origPath) {
		for _, fileInfo := range b.context.GetSums() {
			if fileInfo.Name() == "" {
				continue
			}
			match, _ := filepath.Match(origPath, fileInfo.Name())
			if !match {
				continue
			}

			// Note we set allowWildcards to false in case the name has
			// a * in it
			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false)
		}
		return nil
	}

	// Must be a dir or a file

	if err := b.checkPathForAddition(origPath); err != nil {
		return err
	}
	fi, _ := os.Stat(filepath.Join(b.contextPath, origPath))

	ci := copyInfo{}
	ci.origPath = origPath
	ci.hash = origPath
	ci.destPath = destPath
	ci.decompress = allowDecompression
	*cInfos = append(*cInfos, &ci)

	// Deal with the single file case
	if !fi.IsDir() {
		// This will match first file in sums of the archive
		fis := b.context.GetSums().GetFile(ci.origPath)
		if fis != nil {
			ci.hash = "file:" + fis.Sum()
		}
		return nil
	}

	// Must be a dir
	var subfiles []string
	absOrigPath := filepath.Join(b.contextPath, ci.origPath)

	// Add a trailing / to make sure we only pick up nested files under
	// the dir and not sibling files of the dir that just happen to
	// start with the same chars
	if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) {
		absOrigPath += string(os.PathSeparator)
	}

	// Need path w/o slash too to find matching dir w/o trailing slash
	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]

	for _, fileInfo := range b.context.GetSums() {
		absFile := filepath.Join(b.contextPath, fileInfo.Name())
		// Any file in the context that starts with the given path will be
		// picked up and its hashcode used.  However, we'll exclude the
		// root dir itself.  We do this for a coupel of reasons:
		// 1 - ADD/COPY will not copy the dir itself, just its children
		//     so there's no reason to include it in the hash calc
		// 2 - the metadata on the dir will change when any child file
		//     changes.  This will lead to a miss in the cache check if that
		//     child file is in the .dockerignore list.
		if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash {
			subfiles = append(subfiles, fileInfo.Sum())
		}
	}
	sort.Strings(subfiles)
	hasher := sha256.New()
	hasher.Write([]byte(strings.Join(subfiles, ",")))
	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))

	return nil
}
示例#15
0
文件: internals.go 项目: m1911/hyper
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error {
	if b.context == nil {
		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
	}

	if len(args) < 2 {
		return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
	}

	// Work in daemon-specific filepath semantics
	dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest

	b.runConfig.Image = b.image

	var infos []copyInfo

	// Loop through each src file and calculate the info we need to
	// do the copy (e.g. hash value if cached).  Don't actually do
	// the copy until we've looked at all src files
	var err error
	for _, orig := range args[0 : len(args)-1] {
		var fi builder.FileInfo
		decompress := allowLocalDecompression
		if urlutil.IsURL(orig) {
			if !allowRemote {
				return fmt.Errorf("Source can't be a URL for %s", cmdName)
			}
			fi, err = b.download(orig)
			if err != nil {
				return err
			}
			defer os.RemoveAll(filepath.Dir(fi.Path()))
			decompress = false
			infos = append(infos, copyInfo{fi, decompress})
			continue
		}
		// not a URL
		subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true)
		if err != nil {
			return err
		}

		infos = append(infos, subInfos...)
	}

	if len(infos) == 0 {
		return fmt.Errorf("No source files were specified")
	}
	if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
		return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
	}

	// For backwards compat, if there's just one info then use it as the
	// cache look-up string, otherwise hash 'em all into one
	var srcHash string
	var origPaths string

	if len(infos) == 1 {
		fi := infos[0].FileInfo
		origPaths = fi.Name()
		if hfi, ok := fi.(builder.Hashed); ok {
			srcHash = hfi.Hash()
		}
	} else {
		var hashs []string
		var origs []string
		for _, info := range infos {
			fi := info.FileInfo
			origs = append(origs, fi.Name())
			if hfi, ok := fi.(builder.Hashed); ok {
				hashs = append(hashs, hfi.Hash())
			}
		}
		hasher := sha256.New()
		hasher.Write([]byte(strings.Join(hashs, ",")))
		srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
		origPaths = strings.Join(origs, " ")
	}

	cmd := b.runConfig.Cmd
	if runtime.GOOS != "windows" {
		b.runConfig.Cmd = stringutils.NewStrSlice("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
	} else {
		b.runConfig.Cmd = stringutils.NewStrSlice("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
	}
	defer func(cmd *stringutils.StrSlice) { b.runConfig.Cmd = cmd }(cmd)

	if hit, err := b.probeCache(); err != nil {
		return err
	} else if hit {
		return nil
	}

	// Create the Pod
	podId := fmt.Sprintf("buildpod-%s", utils.RandStr(10, "alpha"))
	tempSrcDir := fmt.Sprintf("/var/run/hyper/temp/%s/", podId)
	if err := os.MkdirAll(tempSrcDir, 0755); err != nil {
		glog.Errorf(err.Error())
		return err
	}
	if _, err := os.Stat(tempSrcDir); err != nil {
		glog.Errorf(err.Error())
		return err
	}
	shellDir := fmt.Sprintf("/var/run/hyper/shell/%s/", podId)
	if err := os.MkdirAll(shellDir, 0755); err != nil {
		glog.Errorf(err.Error())
		return err
	}
	copyshell, err1 := os.Create(shellDir + "/exec-copy.sh")
	if err1 != nil {
		glog.Errorf(err1.Error())
		return err1
	}
	fmt.Fprintf(copyshell, "#!/bin/sh\n")

	podString, err := MakeCopyPod(podId, b.image, b.runConfig.WorkingDir, tempSrcDir, dest, shellDir)
	if err != nil {
		return err
	}
	err = b.Hyperdaemon.CreatePod(podId, podString, false)
	if err != nil {
		return err
	}
	// Get the container
	var (
		containerId string = ""
		container   *daemon.Container
	)

	ps, ok := b.Hyperdaemon.PodList.GetStatus(podId)
	if !ok {
		return fmt.Errorf("Cannot find pod %s", podId)
	}
	for _, i := range ps.Containers {
		containerId = i.Id
	}
	container, err = b.Daemon.Get(containerId)
	if err != nil {
		glog.Error(err.Error())
		return err
	}

	/*
		container, _, err := b.docker.Create(b.runConfig, nil)
		if err != nil {
			return err
		}
		defer b.docker.Unmount(container)
	*/
	b.tmpPods[podId] = struct{}{}
	b.tmpContainers[container.ID] = struct{}{}

	comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)

	// Twiddle the destination when its a relative path - meaning, make it
	// relative to the WORKINGDIR
	if !system.IsAbs(dest) {
		hasSlash := strings.HasSuffix(dest, string(os.PathSeparator))
		dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest)

		// Make sure we preserve any trailing slash
		if hasSlash {
			dest += string(os.PathSeparator)
		}
	}

	for _, info := range infos {
		if err := b.docker.Copy(container, tempSrcDir, info.FileInfo, info.decompress); err != nil {
			return err
		}
		if strings.HasSuffix(dest, string(os.PathSeparator)) == true {
			fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", info.FileInfo.Name(), filepath.Join(dest, info.FileInfo.Name())))
		} else {
			fmt.Fprintf(copyshell, fmt.Sprintf("cp /tmp/src/%s %s\n", info.FileInfo.Name(), dest))
		}
	}

	fmt.Fprintf(copyshell, "umount /tmp/src/\n")
	fmt.Fprintf(copyshell, "umount /tmp/shell/\n")
	fmt.Fprintf(copyshell, "rm -rf /tmp/shell/\n")
	fmt.Fprintf(copyshell, "rm -rf /tmp/src/\n")
	copyshell.Close()
	// start or replace pod
	vm, ok := b.Hyperdaemon.VmList[b.Name]
	if !ok {
		glog.Warningf("can not find VM(%s)", b.Name)

		bo := &hypervisor.BootConfig{
			CPU:    1,
			Memory: 512,
			Kernel: b.Hyperdaemon.Kernel,
			Initrd: b.Hyperdaemon.Initrd,
			Bios:   b.Hyperdaemon.Bios,
			Cbfs:   b.Hyperdaemon.Cbfs,
			Vbox:   b.Hyperdaemon.VboxImage,
		}

		vm = b.Hyperdaemon.NewVm(b.Name, 1, 512, false, types.VM_KEEP_AFTER_FINISH)

		err = vm.Launch(bo)
		if err != nil {
			return err
		}

		b.Hyperdaemon.AddVm(vm)
	}
	if vm.Status == types.S_VM_IDLE {
		code, cause, err := b.Hyperdaemon.StartPod(podId, "", b.Name, nil, false, false, types.VM_KEEP_AFTER_FINISH, []*hypervisor.TtyIO{})
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			b.Hyperdaemon.KillVm(b.Name)
			return err
		}
		vm = b.Hyperdaemon.VmList[b.Name]
		// wait for cmd finish
		Status, err := vm.GetResponseChan()
		if err != nil {
			glog.Error(err.Error())
			return err
		}
		defer vm.ReleaseResponseChan(Status)

		var vmResponse *types.VmResponse
		for {
			vmResponse = <-Status
			if vmResponse.VmId == b.Name {
				if vmResponse.Code == types.E_POD_FINISHED {
					glog.Infof("Got E_POD_FINISHED code response")
					break
				}
			}
		}

		pod, ok := b.Hyperdaemon.PodList.Get(podId)
		if !ok {
			return fmt.Errorf("Cannot find pod %s", podId)
		}
		pod.SetVM(b.Name, vm)

		// release pod from VM
		glog.Warningf("start stop pod")
		code, cause, err = b.Hyperdaemon.StopPod(podId, "no")
		if err != nil {
			glog.Errorf("Code is %d, Cause is %s, %s", code, cause, err.Error())
			b.Hyperdaemon.KillVm(b.Name)
			return err
		}
		glog.Warningf("stop pod finish")
	} else {
		glog.Errorf("Vm is not IDLE")
		return fmt.Errorf("Vm is not IDLE")
	}

	if err := b.commit(container.ID, cmd, comment); err != nil {
		return err
	}
	return nil
}
示例#16
0
// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
// a result guaranteed to be contained within the scope `root`, at the time of the call.
// Symlinks in `root` are not evaluated and left as-is.
// Errors encountered while attempting to evaluate symlinks in path will be returned.
// Non-existing paths are valid and do not constitute an error.
// `path` has to contain `root` as a prefix, or else an error will be returned.
// Trying to break out from `root` does not constitute an error.
//
// Example:
//   If /foo/bar -> /outside,
//   FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
//
// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
// are created and not to create subsequently, additional symlinks that could potentially make a
// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
// no longer be considered safely contained in "/foo".
func evalSymlinksInScope(path, root string) (string, error) {
	root = filepath.Clean(root)
	if path == root {
		return path, nil
	}
	if !strings.HasPrefix(path, root) {
		return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
	}
	const maxIter = 255
	originalPath := path
	// given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
	path = path[len(root):]
	if root == string(filepath.Separator) {
		path = string(filepath.Separator) + path
	}
	if !strings.HasPrefix(path, string(filepath.Separator)) {
		return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
	}
	path = filepath.Clean(path)
	// consume path by taking each frontmost path element,
	// expanding it if it's a symlink, and appending it to b
	var b bytes.Buffer
	// b here will always be considered to be the "current absolute path inside
	// root" when we append paths to it, we also append a slash and use
	// filepath.Clean after the loop to trim the trailing slash
	for n := 0; path != ""; n++ {
		if n > maxIter {
			return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
		}

		// find next path component, p
		i := strings.IndexRune(path, filepath.Separator)
		var p string
		if i == -1 {
			p, path = path, ""
		} else {
			p, path = path[:i], path[i+1:]
		}

		if p == "" {
			continue
		}

		// this takes a b.String() like "b/../" and a p like "c" and turns it
		// into "/b/../c" which then gets filepath.Cleaned into "/c" and then
		// root gets prepended and we Clean again (to remove any trailing slash
		// if the first Clean gave us just "/")
		cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
		if cleanP == string(filepath.Separator) {
			// never Lstat "/" itself
			b.Reset()
			continue
		}
		fullP := filepath.Clean(root + cleanP)

		fi, err := os.Lstat(fullP)
		if os.IsNotExist(err) {
			// if p does not exist, accept it
			b.WriteString(p)
			b.WriteRune(filepath.Separator)
			continue
		}
		if err != nil {
			return "", err
		}
		if fi.Mode()&os.ModeSymlink == 0 {
			b.WriteString(p + string(filepath.Separator))
			continue
		}

		// it's a symlink, put it at the front of path
		dest, err := os.Readlink(fullP)
		if err != nil {
			return "", err
		}
		if system.IsAbs(dest) {
			b.Reset()
		}
		path = dest + string(filepath.Separator) + path
	}

	// see note above on "fullP := ..." for why this is double-cleaned and
	// what's happening here
	return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
}
示例#17
0
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error {
	if b.context == nil {
		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
	}

	if len(args) < 2 {
		return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
	}

	// Work in daemon-specific filepath semantics
	dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest

	b.runConfig.Image = b.image

	var infos []copyInfo

	// Loop through each src file and calculate the info we need to
	// do the copy (e.g. hash value if cached).  Don't actually do
	// the copy until we've looked at all src files
	var err error
	for _, orig := range args[0 : len(args)-1] {
		var fi builder.FileInfo
		decompress := allowLocalDecompression
		if urlutil.IsURL(orig) {
			if !allowRemote {
				return fmt.Errorf("Source can't be a URL for %s", cmdName)
			}
			fi, err = b.download(orig)
			if err != nil {
				return err
			}
			defer os.RemoveAll(filepath.Dir(fi.Path()))
			decompress = false
			infos = append(infos, copyInfo{fi, decompress})
			continue
		}
		// not a URL
		subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true)
		if err != nil {
			return err
		}

		infos = append(infos, subInfos...)
	}

	if len(infos) == 0 {
		return fmt.Errorf("No source files were specified")
	}
	if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
		return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
	}

	// For backwards compat, if there's just one info then use it as the
	// cache look-up string, otherwise hash 'em all into one
	var srcHash string
	var origPaths string

	if len(infos) == 1 {
		fi := infos[0].FileInfo
		origPaths = fi.Name()
		if hfi, ok := fi.(builder.Hashed); ok {
			srcHash = hfi.Hash()
		}
	} else {
		var hashs []string
		var origs []string
		for _, info := range infos {
			fi := info.FileInfo
			origs = append(origs, fi.Name())
			if hfi, ok := fi.(builder.Hashed); ok {
				hashs = append(hashs, hfi.Hash())
			}
		}
		hasher := sha256.New()
		hasher.Write([]byte(strings.Join(hashs, ",")))
		srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
		origPaths = strings.Join(origs, " ")
	}

	cmd := b.runConfig.Cmd
	if runtime.GOOS != "windows" {
		b.runConfig.Cmd = strslice.New("/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest))
	} else {
		b.runConfig.Cmd = strslice.New("cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest))
	}
	defer func(cmd *strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd)

	if hit, err := b.probeCache(); err != nil {
		return err
	} else if hit {
		return nil
	}

	container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig})
	if err != nil {
		return err
	}
	b.tmpContainers[container.ID] = struct{}{}

	comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)

	// Twiddle the destination when its a relative path - meaning, make it
	// relative to the WORKINGDIR
	if !system.IsAbs(dest) {
		hasSlash := strings.HasSuffix(dest, string(os.PathSeparator))
		dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest)

		// Make sure we preserve any trailing slash
		if hasSlash {
			dest += string(os.PathSeparator)
		}
	}

	for _, info := range infos {
		if err := b.docker.BuilderCopy(container.ID, dest, info.FileInfo, info.decompress); err != nil {
			return err
		}
	}

	return b.commit(container.ID, cmd, comment)
}
示例#18
0
func (cli *DockerCli) copyToContainer(srcPath, dstContainer, dstPath string) (err error) {
	if srcPath != "-" {
		// Get an absolute source path.
		srcPath, err = resolveLocalPath(srcPath)
		if err != nil {
			return err
		}
	}

	// In order to get the copy behavior right, we need to know information
	// about both the source and destination. The API is a simple tar
	// archive/extract API but we can use the stat info header about the
	// destination to be more informed about exactly what the destination is.

	// Prepare destination copy info by stat-ing the container path.
	dstInfo := archive.CopyInfo{Path: dstPath}
	dstStat, err := cli.statContainerPath(dstContainer, dstPath)

	// If the destination is a symbolic link, we should evaluate it.
	if err == nil && dstStat.Mode&os.ModeSymlink != 0 {
		linkTarget := dstStat.LinkTarget
		if !system.IsAbs(linkTarget) {
			// Join with the parent directory.
			dstParent, _ := archive.SplitPathDirEntry(dstPath)
			linkTarget = filepath.Join(dstParent, linkTarget)
		}

		dstInfo.Path = linkTarget
		dstStat, err = cli.statContainerPath(dstContainer, linkTarget)
	}

	// Ignore any error and assume that the parent directory of the destination
	// path exists, in which case the copy may still succeed. If there is any
	// type of conflict (e.g., non-directory overwriting an existing directory
	// or vice versia) the extraction will fail. If the destination simply did
	// not exist, but the parent directory does, the extraction will still
	// succeed.
	if err == nil {
		dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir()
	}

	var (
		content         io.Reader
		resolvedDstPath string
	)

	if srcPath == "-" {
		// Use STDIN.
		content = os.Stdin
		resolvedDstPath = dstInfo.Path
		if !dstInfo.IsDir {
			return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath))
		}
	} else {
		// Prepare source copy info.
		srcInfo, err := archive.CopyInfoSourcePath(srcPath)
		if err != nil {
			return err
		}

		srcArchive, err := archive.TarResource(srcInfo)
		if err != nil {
			return err
		}
		defer srcArchive.Close()

		// With the stat info about the local source as well as the
		// destination, we have enough information to know whether we need to
		// alter the archive that we upload so that when the server extracts
		// it to the specified directory in the container we get the disired
		// copy behavior.

		// See comments in the implementation of `archive.PrepareArchiveCopy`
		// for exactly what goes into deciding how and whether the source
		// archive needs to be altered for the correct copy behavior when it is
		// extracted. This function also infers from the source and destination
		// info which directory to extract to, which may be the parent of the
		// destination that the user specified.
		dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo)
		if err != nil {
			return err
		}
		defer preparedArchive.Close()

		resolvedDstPath = dstDir
		content = preparedArchive
	}

	query := make(url.Values, 2)
	query.Set("path", filepath.ToSlash(resolvedDstPath)) // Normalize the paths used in the API.
	// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
	query.Set("noOverwriteDirNonDir", "true")

	urlStr := fmt.Sprintf("/containers/%s/archive?%s", dstContainer, query.Encode())

	response, err := cli.stream("PUT", urlStr, &streamOpts{in: content})
	if err != nil {
		return err
	}
	defer response.body.Close()

	if response.statusCode != http.StatusOK {
		return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
	}

	return nil
}
示例#19
0
func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error {
	if b.context == nil {
		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
	}

	if len(args) < 2 {
		return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName)
	}

	// Work in daemon-specific filepath semantics
	dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest

	b.runConfig.Image = b.image

	var infos []copyInfo

	// Loop through each src file and calculate the info we need to
	// do the copy (e.g. hash value if cached).  Don't actually do
	// the copy until we've looked at all src files
	var err error
	for _, orig := range args[0 : len(args)-1] {
		var fi builder.FileInfo
		decompress := allowLocalDecompression
		if urlutil.IsURL(orig) {
			if !allowRemote {
				return fmt.Errorf("Source can't be a URL for %s", cmdName)
			}
			fi, err = b.download(orig)
			if err != nil {
				return err
			}
			defer os.RemoveAll(filepath.Dir(fi.Path()))
			decompress = false
			infos = append(infos, copyInfo{fi, decompress})
			continue
		}
		// not a URL
		subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true)
		if err != nil {
			return err
		}

		infos = append(infos, subInfos...)
	}

	if len(infos) == 0 {
		return fmt.Errorf("No source files were specified")
	}
	if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) {
		return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
	}

	// For backwards compat, if there's just one info then use it as the
	// cache look-up string, otherwise hash 'em all into one
	var srcHash string
	var origPaths string

	if len(infos) == 1 {
		fi := infos[0].FileInfo
		origPaths = fi.Name()
		if hfi, ok := fi.(builder.Hashed); ok {
			srcHash = hfi.Hash()
		}
	} else {
		var hashs []string
		var origs []string
		for _, info := range infos {
			fi := info.FileInfo
			origs = append(origs, fi.Name())
			if hfi, ok := fi.(builder.Hashed); ok {
				hashs = append(hashs, hfi.Hash())
			}
		}
		hasher := sha256.New()
		hasher.Write([]byte(strings.Join(hashs, ",")))
		srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil))
		origPaths = strings.Join(origs, " ")
	}

	cmd := b.runConfig.Cmd
	if runtime.GOOS != "windows" {
		b.runConfig.Cmd = strslice.StrSlice{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, srcHash, dest)}
	} else {
		b.runConfig.Cmd = strslice.StrSlice{"cmd", "/S", "/C", fmt.Sprintf("REM (nop) %s %s in %s", cmdName, srcHash, dest)}
	}
	defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd)

	if hit, err := b.probeCache(); err != nil {
		return err
	} else if hit {
		return nil
	}

	container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig})
	if err != nil {
		return err
	}
	b.tmpContainers[container.ID] = struct{}{}

	comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest)

	// Twiddle the destination when its a relative path - meaning, make it
	// relative to the WORKINGDIR

	endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator))

	if runtime.GOOS == "windows" {
		// On Windows, this is more complicated. We are guaranteed that the
		// WorkingDir is already platform consistent meaning in the format
		// UPPERCASEDriveLetter-Colon-Backslash-Foldername. However, Windows
		// for now also has the limitation that ADD/COPY can only be done to
		// the C: (system) drive, not any drives that might be present as a
		// result of bind mounts.
		//
		// So... if the path specified is Linux-style absolute (/foo or \\foo),
		// we assume it is the system drive. If it is a Windows-style absolute
		// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
		// strip any configured working directories drive letter so that it
		// can be subsequently legitimately converted to a Windows volume-style
		// pathname.

		// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
		// we only want to validate where the DriveColon part has been supplied.
		if filepath.IsAbs(dest) {
			if strings.ToUpper(string(dest[0])) != "C" {
				return fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName)
			}
			dest = dest[2:] // Strip the drive letter
		}

		// Cannot handle relative where WorkingDir is not the system drive.
		if len(b.runConfig.WorkingDir) > 0 {
			if !system.IsAbs(b.runConfig.WorkingDir[2:]) {
				return fmt.Errorf("Current WorkingDir %s is not platform consistent", b.runConfig.WorkingDir)
			}
			if !system.IsAbs(dest) {
				if string(b.runConfig.WorkingDir[0]) != "C" {
					return fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName)
				}

				dest = filepath.Join(string(os.PathSeparator), b.runConfig.WorkingDir[2:], dest)

				// Make sure we preserve any trailing slash
				if endsInSlash {
					dest += string(os.PathSeparator)
				}
			}
		}
	} else {
		if !system.IsAbs(dest) {
			dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.runConfig.WorkingDir), dest)

			// Make sure we preserve any trailing slash
			if endsInSlash {
				dest += string(os.PathSeparator)
			}
		}
	}

	for _, info := range infos {
		if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil {
			return err
		}
	}

	return b.commit(container.ID, cmd, comment)
}
示例#20
0
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
	maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
	path = normalizePath(path)
	originalPath := path

	stat, err := os.Lstat(path)

	if err == nil && stat.Mode()&os.ModeSymlink == 0 {
		// The path exists and is not a symlink.
		return CopyInfo{
			Path:   path,
			Exists: true,
			IsDir:  stat.IsDir(),
		}, nil
	}

	// While the path is a symlink.
	for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
		if n > maxSymlinkIter {
			// Don't follow symlinks more than this arbitrary number of times.
			return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
		}

		// The path is a symbolic link. We need to evaluate it so that the
		// destination of the copy operation is the link target and not the
		// link itself. This is notably different than CopyInfoSourcePath which
		// only evaluates symlinks before the last appearing path separator.
		// Also note that it is okay if the last path element is a broken
		// symlink as the copy operation should create the target.
		var linkTarget string

		linkTarget, err = os.Readlink(path)
		if err != nil {
			return CopyInfo{}, err
		}

		if !system.IsAbs(linkTarget) {
			// Join with the parent directory.
			dstParent, _ := SplitPathDirEntry(path)
			linkTarget = filepath.Join(dstParent, linkTarget)
		}

		path = linkTarget
		stat, err = os.Lstat(path)
	}

	if err != nil {
		// It's okay if the destination path doesn't exist. We can still
		// continue the copy operation if the parent directory exists.
		if !os.IsNotExist(err) {
			return CopyInfo{}, err
		}

		// Ensure destination parent dir exists.
		dstParent, _ := SplitPathDirEntry(path)

		parentDirStat, err := os.Lstat(dstParent)
		if err != nil {
			return CopyInfo{}, err
		}
		if !parentDirStat.IsDir() {
			return CopyInfo{}, ErrNotDirectory
		}

		return CopyInfo{Path: path}, nil
	}

	// The path exists after resolving symlinks.
	return CopyInfo{
		Path:   path,
		Exists: true,
		IsDir:  stat.IsDir(),
	}, nil
}
示例#21
0
文件: cp.go 项目: rgl/docker
func (cli *DockerCli) copyFromContainer(srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) {
	if dstPath != "-" {
		// Get an absolute destination path.
		dstPath, err = resolveLocalPath(dstPath)
		if err != nil {
			return err
		}
	}

	// if client requests to follow symbol link, then must decide target file to be copied
	var rebaseName string
	if cpParam.followLink {
		srcStat, err := cli.statContainerPath(srcContainer, srcPath)

		// If the destination is a symbolic link, we should follow it.
		if err == nil && srcStat.Mode&os.ModeSymlink != 0 {
			linkTarget := srcStat.LinkTarget
			if !system.IsAbs(linkTarget) {
				// Join with the parent directory.
				srcParent, _ := archive.SplitPathDirEntry(srcPath)
				linkTarget = filepath.Join(srcParent, linkTarget)
			}

			linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget)
			srcPath = linkTarget
		}

	}

	query := make(url.Values, 1)
	query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.

	urlStr := fmt.Sprintf("/containers/%s/archive?%s", srcContainer, query.Encode())

	response, err := cli.call("GET", urlStr, nil, nil)
	if err != nil {
		return err
	}
	defer response.body.Close()

	if response.statusCode != http.StatusOK {
		return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode)
	}

	if dstPath == "-" {
		// Send the response to STDOUT.
		_, err = io.Copy(os.Stdout, response.body)

		return err
	}

	// In order to get the copy behavior right, we need to know information
	// about both the source and the destination. The response headers include
	// stat info about the source that we can use in deciding exactly how to
	// copy it locally. Along with the stat info about the local destination,
	// we have everything we need to handle the multiple possibilities there
	// can be when copying a file/dir from one location to another file/dir.
	stat, err := getContainerPathStatFromHeader(response.header)
	if err != nil {
		return fmt.Errorf("unable to get resource stat from response: %s", err)
	}

	// Prepare source copy info.
	srcInfo := archive.CopyInfo{
		Path:       srcPath,
		Exists:     true,
		IsDir:      stat.Mode.IsDir(),
		RebaseName: rebaseName,
	}

	preArchive := response.body
	if len(srcInfo.RebaseName) != 0 {
		_, srcBase := archive.SplitPathDirEntry(srcInfo.Path)
		preArchive = archive.RebaseArchiveEntries(response.body, srcBase, srcInfo.RebaseName)
	}
	// See comments in the implementation of `archive.CopyTo` for exactly what
	// goes into deciding how and whether the source archive needs to be
	// altered for the correct copy behavior.
	return archive.CopyTo(preArchive, srcInfo, dstPath)
}