Пример #1
0
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {

	patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)

	if err != nil {
		return nil, err
	}

	pipeReader, pipeWriter := io.Pipe()

	compressWriter, err := CompressStream(pipeWriter, options.Compression)
	if err != nil {
		return nil, err
	}

	go func() {
		ta := &tarAppender{
			TarWriter: tar.NewWriter(compressWriter),
			Buffer:    pools.BufioWriter32KPool.Get(nil),
			SeenFiles: make(map[uint64]string),
		}
		// this buffer is needed for the duration of this piped stream
		defer pools.BufioWriter32KPool.Put(ta.Buffer)

		// In general we log errors here but ignore them because
		// during e.g. a diff operation the container can continue
		// mutating the filesystem and we can see transient errors
		// from this

		if options.IncludeFiles == nil {
			options.IncludeFiles = []string{"."}
		}

		seen := make(map[string]bool)

		var renamedRelFilePath string // For when tar.Options.Name is set
		for _, include := range options.IncludeFiles {
			filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
				if err != nil {
					logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
					return nil
				}

				relFilePath, err := filepath.Rel(srcPath, filePath)
				if err != nil || (relFilePath == "." && f.IsDir()) {
					// Error getting relative path OR we are looking
					// at the root path. Skip in both situations.
					return nil
				}

				skip := false

				// If "include" is an exact match for the current file
				// then even if there's an "excludePatterns" pattern that
				// matches it, don't skip it. IOW, assume an explicit 'include'
				// is asking for that file no matter what - which is true
				// for some files, like .dockerignore and Dockerfile (sometimes)
				if include != relFilePath {
					skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
					if err != nil {
						logrus.Debugf("Error matching %s", relFilePath, err)
						return err
					}
				}

				if skip {
					if !exceptions && f.IsDir() {
						return filepath.SkipDir
					}
					return nil
				}

				if seen[relFilePath] {
					return nil
				}
				seen[relFilePath] = true

				// Rename the base resource
				if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
					renamedRelFilePath = relFilePath
				}
				// Set this to make sure the items underneath also get renamed
				if options.Name != "" {
					relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1)
				}

				if err := ta.addTarFile(filePath, relFilePath); err != nil {
					logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
				}
				return nil
			})
		}

		// Make sure to check the error on Close.
		if err := ta.TarWriter.Close(); err != nil {
			logrus.Debugf("Can't close tar writer: %s", err)
		}
		if err := compressWriter.Close(); err != nil {
			logrus.Debugf("Can't close compress writer: %s", err)
		}
		if err := pipeWriter.Close(); err != nil {
			logrus.Debugf("Can't close pipe writer: %s", err)
		}
	}()

	return pipeReader, nil
}
Пример #2
0
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {

	// Fix the source path to work with long path names. This is a no-op
	// on platforms other than Windows.
	srcPath = fixVolumePathPrefix(srcPath)

	patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)

	if err != nil {
		return nil, err
	}

	pipeReader, pipeWriter := io.Pipe()

	compressWriter, err := CompressStream(pipeWriter, options.Compression)
	if err != nil {
		return nil, err
	}

	go func() {
		ta := &tarAppender{
			TarWriter:         tar.NewWriter(compressWriter),
			Buffer:            pools.BufioWriter32KPool.Get(nil),
			SeenFiles:         make(map[uint64]string),
			UIDMaps:           options.UIDMaps,
			GIDMaps:           options.GIDMaps,
			WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
		}

		defer func() {
			// Make sure to check the error on Close.
			if err := ta.TarWriter.Close(); err != nil {
				logrus.Errorf("Can't close tar writer: %s", err)
			}
			if err := compressWriter.Close(); err != nil {
				logrus.Errorf("Can't close compress writer: %s", err)
			}
			if err := pipeWriter.Close(); err != nil {
				logrus.Errorf("Can't close pipe writer: %s", err)
			}
		}()

		// this buffer is needed for the duration of this piped stream
		defer pools.BufioWriter32KPool.Put(ta.Buffer)

		// In general we log errors here but ignore them because
		// during e.g. a diff operation the container can continue
		// mutating the filesystem and we can see transient errors
		// from this

		stat, err := os.Lstat(srcPath)
		if err != nil {
			return
		}

		if !stat.IsDir() {
			// We can't later join a non-dir with any includes because the
			// 'walk' will error if "file/." is stat-ed and "file" is not a
			// directory. So, we must split the source path and use the
			// basename as the include.
			if len(options.IncludeFiles) > 0 {
				logrus.Warn("Tar: Can't archive a file with includes")
			}

			dir, base := SplitPathDirEntry(srcPath)
			srcPath = dir
			options.IncludeFiles = []string{base}
		}

		if len(options.IncludeFiles) == 0 {
			options.IncludeFiles = []string{"."}
		}

		seen := make(map[string]bool)

		for _, include := range options.IncludeFiles {
			rebaseName := options.RebaseNames[include]

			walkRoot := getWalkRoot(srcPath, include)
			filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
				if err != nil {
					logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
					return nil
				}

				relFilePath, err := filepath.Rel(srcPath, filePath)
				if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
					// Error getting relative path OR we are looking
					// at the source directory path. Skip in both situations.
					return nil
				}

				if options.IncludeSourceDir && include == "." && relFilePath != "." {
					relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
				}

				skip := false

				// If "include" is an exact match for the current file
				// then even if there's an "excludePatterns" pattern that
				// matches it, don't skip it. IOW, assume an explicit 'include'
				// is asking for that file no matter what - which is true
				// for some files, like .dockerignore and Dockerfile (sometimes)
				if include != relFilePath {
					skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
					if err != nil {
						logrus.Errorf("Error matching %s: %v", relFilePath, err)
						return err
					}
				}

				if skip {
					// If we want to skip this file and its a directory
					// then we should first check to see if there's an
					// excludes pattern (eg !dir/file) that starts with this
					// dir. If so then we can't skip this dir.

					// Its not a dir then so we can just return/skip.
					if !f.IsDir() {
						return nil
					}

					// No exceptions (!...) in patterns so just skip dir
					if !exceptions {
						return filepath.SkipDir
					}

					dirSlash := relFilePath + string(filepath.Separator)

					for _, pat := range patterns {
						if pat[0] != '!' {
							continue
						}
						pat = pat[1:] + string(filepath.Separator)
						if strings.HasPrefix(pat, dirSlash) {
							// found a match - so can't skip this dir
							return nil
						}
					}

					// No matching exclusion dir so just skip dir
					return filepath.SkipDir
				}

				if seen[relFilePath] {
					return nil
				}
				seen[relFilePath] = true

				// Rename the base resource.
				if rebaseName != "" {
					var replacement string
					if rebaseName != string(filepath.Separator) {
						// Special case the root directory to replace with an
						// empty string instead so that we don't end up with
						// double slashes in the paths.
						replacement = rebaseName
					}

					relFilePath = strings.Replace(relFilePath, include, replacement, 1)
				}

				if err := ta.addTarFile(filePath, relFilePath); err != nil {
					logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
					// if pipe is broken, stop writing tar stream to it
					if err == io.ErrClosedPipe {
						return err
					}
				}
				return nil
			})
		}
	}()

	return pipeReader, nil
}
Пример #3
0
func listFiles(srcPath string, includes, excludes []string, cmdName string, urlFetcher URLFetcher) ([]*uploadFile, error) {

	log.Debugf("searching patterns, %# v\n", pretty.Formatter(includes))

	result := []*uploadFile{}
	seen := map[string]struct{}{}

	// TODO: support local archives (and maybe a remote archives as well)

	excludes, patDirs, exceptions, err := fileutils.CleanPatterns(excludes)
	if err != nil {
		return nil, err
	}

	// TODO: here we remove some exclude patterns, how about patDirs?
	excludes, nestedPatterns := findNestedPatterns(excludes)

	for _, pattern := range includes {

		if isURL(pattern) {
			if cmdName == "COPY" {
				return nil, fmt.Errorf("can't use url in COPY command: '%s'", pattern)
			}

			if urlFetcher == nil {
				return nil, fmt.Errorf("want to list a downloaded url '%s', but URLFetcher is not present", pattern)
			}

			ui, err := urlFetcher.GetInfo(pattern)
			if err != nil {
				return nil, err
			}

			result = append(result, &uploadFile{
				src:  ui.FileName,
				dest: ui.BaseName,
				size: ui.Size,
			})
			continue
		}

		matches, err := filepath.Glob(filepath.Join(srcPath, pattern))
		if err != nil {
			return result, err
		}

		for _, match := range matches {

			// We need to check if the current match is dir
			// to prefix files inside with it
			matchInfo, err := os.Stat(match)
			if err != nil {
				return result, err
			}

			// Walk through each match since it may be a directory
			err = filepath.Walk(match, func(path string, info os.FileInfo, err error) error {

				if err != nil {
					return err
				}

				relFilePath, err := filepath.Rel(srcPath, path)
				if err != nil {
					return err
				}

				// TODO: ensure ignoring works correctly, maybe improve .dockerignore to work more like .gitignore?

				skip := false
				skipNested := false

				// Here we want to keep files that are specified explicitly in the includes,
				// no matter what. For example, .dockerignore can have some wildcard items
				// specified, by in COPY we want explicitly add a file, that could be ignored
				// otherwise using a wildcard or directory COPY
				if pattern != relFilePath {
					if skip, err = fileutils.OptimizedMatches(relFilePath, excludes, patDirs); err != nil {
						return err
					}
					if skipNested, err = matchNested(relFilePath, nestedPatterns); err != nil {
						return err
					}
				}

				if skip || skipNested {
					if !exceptions && info.IsDir() {
						return filepath.SkipDir
					}
					return nil
				}

				// TODO: read links?

				// not interested in dirs, since we walk already
				if info.IsDir() {
					return nil
				}

				if _, ok := seen[relFilePath]; ok {
					return nil
				}
				seen[relFilePath] = struct{}{}

				// cut the wildcard path of the file or use base name

				var (
					resultFilePath string
					baseChunks     = splitPath(pattern)
					destChunks     = splitPath(relFilePath)
					lastChunk      = baseChunks[len(baseChunks)-1]
				)

				if containsWildcards(lastChunk) {
					// In case there is `foo/bar/*` source path we need to make a
					// destination files without `foo/bar/` prefix
					resultFilePath = filepath.Join(destChunks[len(baseChunks)-1:]...)
				} else if matchInfo.IsDir() {
					// If source is a directory, keep as is
					resultFilePath = relFilePath
				} else {
					// The source has referred to a file
					resultFilePath = filepath.Base(relFilePath)
				}

				result = append(result, &uploadFile{
					src:  path,
					dest: resultFilePath,
					size: info.Size(),
				})

				return nil
			})

			if err != nil {
				return result, err
			}
		}
	}

	return result, nil
}
Пример #4
0
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {

	patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns)

	if err != nil {
		return nil, err
	}

	pipeReader, pipeWriter := io.Pipe()

	compressWriter, err := CompressStream(pipeWriter, options.Compression)
	if err != nil {
		return nil, err
	}

	go func() {
		ta := &tarAppender{
			TarWriter: tar.NewWriter(compressWriter),
			Buffer:    pools.BufioWriter32KPool.Get(nil),
			SeenFiles: make(map[uint64]string),
		}

		defer func() {
			// Make sure to check the error on Close.
			if err := ta.TarWriter.Close(); err != nil {
				logrus.Debugf("Can't close tar writer: %s", err)
			}
			if err := compressWriter.Close(); err != nil {
				logrus.Debugf("Can't close compress writer: %s", err)
			}
			if err := pipeWriter.Close(); err != nil {
				logrus.Debugf("Can't close pipe writer: %s", err)
			}
		}()

		// this buffer is needed for the duration of this piped stream
		defer pools.BufioWriter32KPool.Put(ta.Buffer)

		// In general we log errors here but ignore them because
		// during e.g. a diff operation the container can continue
		// mutating the filesystem and we can see transient errors
		// from this

		stat, err := os.Lstat(srcPath)
		if err != nil {
			return
		}

		if !stat.IsDir() {
			// We can't later join a non-dir with any includes because the
			// 'walk' will error if "file/." is stat-ed and "file" is not a
			// directory. So, we must split the source path and use the
			// basename as the include.
			if len(options.IncludeFiles) > 0 {
				logrus.Warn("Tar: Can't archive a file with includes")
			}

			dir, base := SplitPathDirEntry(srcPath)
			srcPath = dir
			options.IncludeFiles = []string{base}
		}

		if len(options.IncludeFiles) == 0 {
			options.IncludeFiles = []string{"."}
		}

		seen := make(map[string]bool)

		for _, include := range options.IncludeFiles {
			rebaseName := options.RebaseNames[include]

			// We can't use filepath.Join(srcPath, include) because this will
			// clean away a trailing "." or "/" which may be important.
			walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator))
			filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
				if err != nil {
					logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
					return nil
				}

				relFilePath, err := filepath.Rel(srcPath, filePath)
				if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) {
					// Error getting relative path OR we are looking
					// at the source directory path. Skip in both situations.
					return nil
				}

				if options.IncludeSourceDir && include == "." && relFilePath != "." {
					relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator))
				}

				skip := false

				// If "include" is an exact match for the current file
				// then even if there's an "excludePatterns" pattern that
				// matches it, don't skip it. IOW, assume an explicit 'include'
				// is asking for that file no matter what - which is true
				// for some files, like .dockerignore and Dockerfile (sometimes)
				if include != relFilePath {
					skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
					if err != nil {
						logrus.Debugf("Error matching %s: %v", relFilePath, err)
						return err
					}
				}

				if skip {
					if !exceptions && f.IsDir() {
						return filepath.SkipDir
					}
					return nil
				}

				if seen[relFilePath] {
					return nil
				}
				seen[relFilePath] = true

				// Rename the base resource.
				if rebaseName != "" {
					var replacement string
					if rebaseName != string(filepath.Separator) {
						// Special case the root directory to replace with an
						// empty string instead so that we don't end up with
						// double slashes in the paths.
						replacement = rebaseName
					}

					relFilePath = strings.Replace(relFilePath, include, replacement, 1)
				}

				if err := ta.addTarFile(filePath, relFilePath); err != nil {
					logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
				}
				return nil
			})
		}
	}()

	return pipeReader, nil
}
Пример #5
0
func listFiles(srcPath string, includes, excludes []string) ([]*uploadFile, error) {

	result := []*uploadFile{}
	seen := map[string]struct{}{}

	// TODO: support urls
	// TODO: support local archives (and maybe a remote archives as well)

	excludes, patDirs, exceptions, err := fileutils.CleanPatterns(excludes)
	if err != nil {
		return nil, err
	}

	// TODO: here we remove some exclude patterns, how about patDirs?
	excludes, nestedPatterns := findNestedPatterns(excludes)

	for _, pattern := range includes {

		matches, err := filepath.Glob(filepath.Join(srcPath, pattern))
		if err != nil {
			return result, err
		}

		for _, match := range matches {

			// We need to check if the current match is dir
			// to prefix files inside with it
			matchInfo, err := os.Stat(match)
			if err != nil {
				return result, err
			}

			// Walk through each match since it may be a directory
			err = filepath.Walk(match, func(path string, info os.FileInfo, err error) error {

				relFilePath, err := filepath.Rel(srcPath, path)
				if err != nil {
					return err
				}

				// TODO: ensure ignoring works correctly, maybe improve .dockerignore to work more like .gitignore?

				skip := false
				skipNested := false

				// Here we want to keep files that are specified explicitly in the includes,
				// no matter what. For example, .dockerignore can have some wildcard items
				// specified, by in COPY we want explicitly add a file, that could be ignored
				// otherwise using a wildcard or directory COPY
				if pattern != relFilePath {
					if skip, err = fileutils.OptimizedMatches(relFilePath, excludes, patDirs); err != nil {
						return err
					}
					if skipNested, err = matchNested(relFilePath, nestedPatterns); err != nil {
						return err
					}
				}

				if skip || skipNested {
					if !exceptions && info.IsDir() {
						return filepath.SkipDir
					}
					return nil
				}

				// TODO: read links?

				// not interested in dirs, since we walk already
				if info.IsDir() {
					return nil
				}

				// skip checking if symlinks point to non-existing file
				// also skip named pipes, because they hanging on open
				if info.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 {
					return nil
				}

				if _, ok := seen[relFilePath]; ok {
					return nil
				}
				seen[relFilePath] = struct{}{}

				// cut the wildcard path of the file or use base name

				var (
					resultFilePath string
					baseChunks     = splitPath(pattern)
					destChunks     = splitPath(relFilePath)
					lastChunk      = baseChunks[len(baseChunks)-1]
				)

				if containsWildcards(lastChunk) {
					// In case there is `foo/bar/*` source path we need to make a
					// destination files without `foo/bar/` prefix
					resultFilePath = filepath.Join(destChunks[len(baseChunks)-1:]...)
				} else if matchInfo.IsDir() {
					// If source is a directory, keep as is
					resultFilePath = relFilePath
				} else {
					// The source has referred to a file
					resultFilePath = filepath.Base(relFilePath)
				}

				result = append(result, &uploadFile{
					src:     path,
					dest:    resultFilePath,
					relDest: relFilePath,
					size:    info.Size(),
				})

				return nil
			})

			if err != nil {
				return result, err
			}
		}
	}

	return result, nil
}