Ejemplo n.º 1
0
// TarLayer returns a tar archive of the image's filesystem layer.
func (img *Image) TarLayer() (arch archive.Archive, err error) {
	if img.graph == nil {
		return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID)
	}
	driver := img.graph.Driver()
	if differ, ok := driver.(graphdriver.Differ); ok {
		return differ.Diff(img.ID)
	}

	imgFs, err := driver.Get(img.ID, "")
	if err != nil {
		return nil, err
	}

	defer func() {
		if err != nil {
			driver.Put(img.ID)
		}
	}()

	if img.Parent == "" {
		archive, err := archive.Tar(imgFs, archive.Uncompressed)
		if err != nil {
			return nil, err
		}
		return utils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			driver.Put(img.ID)
			return err
		}), nil
	}

	parentFs, err := driver.Get(img.Parent, "")
	if err != nil {
		return nil, err
	}
	defer driver.Put(img.Parent)
	changes, err := archive.ChangesDirs(imgFs, parentFs)
	if err != nil {
		return nil, err
	}
	archive, err := archive.ExportChanges(imgFs, changes)
	if err != nil {
		return nil, err
	}
	return utils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		driver.Put(img.ID)
		return err
	}), nil
}
Ejemplo n.º 2
0
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *naiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) {
	driver := gdw.ProtoDriver

	layerFs, err := driver.Get(id, "")
	if err != nil {
		return nil, err
	}

	defer func() {
		if err != nil {
			driver.Put(id)
		}
	}()

	if parent == "" {
		archive, err := archive.Tar(layerFs, archive.Uncompressed)
		if err != nil {
			return nil, err
		}
		return ioutils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			driver.Put(id)
			return err
		}), nil
	}

	parentFs, err := driver.Get(parent, "")
	if err != nil {
		return nil, err
	}
	defer driver.Put(parent)

	changes, err := archive.ChangesDirs(layerFs, parentFs)
	if err != nil {
		return nil, err
	}

	archive, err := archive.ExportChanges(layerFs, changes)
	if err != nil {
		return nil, err
	}

	return ioutils.NewReadCloserWrapper(archive, func() error {
		err := archive.Close()
		driver.Put(id)
		return err
	}), nil
}
Ejemplo n.º 3
0
func (container *Container) Export() (archive.Archive, error) {
	if err := container.Mount(); err != nil {
		return nil, err
	}

	archive, err := archive.Tar(container.basefs, archive.Uncompressed)
	if err != nil {
		container.Unmount()
		return nil, err
	}
	return utils.NewReadCloserWrapper(archive, func() error {
			err := archive.Close()
			container.Unmount()
			return err
		}),
		nil
}
Ejemplo n.º 4
0
func testContextTar(t *testing.T, compression archive.Compression) {
	contextDirectory := filepath.Join(workingDirectory, "build_tests", "TestContextTar")
	context, err := archive.Tar(contextDirectory, compression)

	if err != nil {
		t.Fatalf("failed to build context tar: %v", err)
	}
	buildCmd := exec.Command(dockerBinary, "build", "-t", "contexttar", "-")
	buildCmd.Stdin = context

	out, exitCode, err := runCommandWithOutput(buildCmd)
	if err != nil || exitCode != 0 {
		t.Fatalf("build failed to complete: %v %v", out, err)
	}
	deleteImages("contexttar")
	logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression))
}
Ejemplo n.º 5
0
func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error {
	if b.context == nil {
		return fmt.Errorf("No context given. Impossible to use %s", cmdName)
	}
	tmp := strings.SplitN(args, " ", 2)
	if len(tmp) != 2 {
		return fmt.Errorf("Invalid %s format", cmdName)
	}

	orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t"))
	if err != nil {
		return err
	}

	dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t"))
	if err != nil {
		return err
	}

	cmd := b.config.Cmd
	b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)}
	defer func(cmd []string) { b.config.Cmd = cmd }(cmd)
	b.config.Image = b.image

	var (
		origPath   = orig
		destPath   = dest
		remoteHash string
		isRemote   bool
		decompress = true
	)

	isRemote = utils.IsURL(orig)
	if isRemote && !allowRemote {
		return fmt.Errorf("Source can't be an URL for %s", cmdName)
	} else if utils.IsURL(orig) {
		// Initiate the download
		resp, err := utils.Download(orig)
		if err != nil {
			return err
		}

		// Create a tmp dir
		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
		if err != nil {
			return err
		}

		// Create a tmp file within our tmp dir
		tmpFileName := path.Join(tmpDirName, "tmp")
		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
		if err != nil {
			return err
		}
		defer os.RemoveAll(tmpDirName)

		// Download and dump result to tmp file
		if _, err := io.Copy(tmpFile, resp.Body); err != nil {
			tmpFile.Close()
			return err
		}
		tmpFile.Close()

		// Remove the mtime of the newly created tmp file
		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
			return err
		}

		origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))

		// Process the checksum
		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
		if err != nil {
			return err
		}
		tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true}
		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
			return err
		}
		remoteHash = tarSum.Sum(nil)
		r.Close()

		// If the destination is a directory, figure out the filename.
		if strings.HasSuffix(dest, "/") {
			u, err := url.Parse(orig)
			if err != nil {
				return err
			}
			path := u.Path
			if strings.HasSuffix(path, "/") {
				path = path[:len(path)-1]
			}
			parts := strings.Split(path, "/")
			filename := parts[len(parts)-1]
			if filename == "" {
				return fmt.Errorf("cannot determine filename from url: %s", u)
			}
			destPath = dest + filename
		}
	}

	if err := b.checkPathForAddition(origPath); err != nil {
		return err
	}

	// Hash path and check the cache
	if b.utilizeCache {
		var (
			hash string
			sums = b.context.GetSums()
		)

		if remoteHash != "" {
			hash = remoteHash
		} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
			return err
		} else if fi.IsDir() {
			var subfiles []string
			for file, sum := range sums {
				absFile := path.Join(b.contextPath, file)
				absOrigPath := path.Join(b.contextPath, origPath)
				if strings.HasPrefix(absFile, absOrigPath) {
					subfiles = append(subfiles, sum)
				}
			}
			sort.Strings(subfiles)
			hasher := sha256.New()
			hasher.Write([]byte(strings.Join(subfiles, ",")))
			hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
		} else {
			if origPath[0] == '/' && len(origPath) > 1 {
				origPath = origPath[1:]
			}
			origPath = strings.TrimPrefix(origPath, "./")
			if h, ok := sums[origPath]; ok {
				hash = "file:" + h
			}
		}
		b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)}
		hit, err := b.probeCache()
		if err != nil {
			return err
		}
		// If we do not have a hash, never use the cache
		if hit && hash != "" {
			return nil
		}
	}

	// Create the container
	container, _, err := b.daemon.Create(b.config, "")
	if err != nil {
		return err
	}
	b.tmpContainers[container.ID] = struct{}{}

	if err := container.Mount(); err != nil {
		return err
	}
	defer container.Unmount()

	if !allowDecompression || isRemote {
		decompress = false
	}
	if err := b.addContext(container, origPath, destPath, decompress); err != nil {
		return err
	}

	if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil {
		return err
	}
	return nil
}
Ejemplo n.º 6
0
func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status {
	if len(job.Args) != 0 {
		return job.Errorf("Usage: %s\n", job.Name)
	}
	var (
		remoteURL      = job.Getenv("remote")
		repoName       = job.Getenv("t")
		suppressOutput = job.GetenvBool("q")
		noCache        = job.GetenvBool("nocache")
		rm             = job.GetenvBool("rm")
		forceRm        = job.GetenvBool("forcerm")
		authConfig     = &registry.AuthConfig{}
		configFile     = &registry.ConfigFile{}
		tag            string
		context        io.ReadCloser
	)
	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("configFile", configFile)
	repoName, tag = parsers.ParseRepositoryTag(repoName)

	if remoteURL == "" {
		context = ioutil.NopCloser(job.Stdin)
	} else if utils.IsGIT(remoteURL) {
		if !strings.HasPrefix(remoteURL, "git://") {
			remoteURL = "https://" + remoteURL
		}
		root, err := ioutil.TempDir("", "docker-build-git")
		if err != nil {
			return job.Error(err)
		}
		defer os.RemoveAll(root)

		if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
			return job.Errorf("Error trying to use git: %s (%s)", err, output)
		}

		c, err := archive.Tar(root, archive.Uncompressed)
		if err != nil {
			return job.Error(err)
		}
		context = c
	} else if utils.IsURL(remoteURL) {
		f, err := utils.Download(remoteURL)
		if err != nil {
			return job.Error(err)
		}
		defer f.Body.Close()
		dockerFile, err := ioutil.ReadAll(f.Body)
		if err != nil {
			return job.Error(err)
		}
		c, err := archive.Generate("Dockerfile", string(dockerFile))
		if err != nil {
			return job.Error(err)
		}
		context = c
	}
	defer context.Close()

	sf := utils.NewStreamFormatter(job.GetenvBool("json"))
	b := NewBuildFile(daemon, daemon.eng,
		&utils.StdoutFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		&utils.StderrFormater{
			Writer:          job.Stdout,
			StreamFormatter: sf,
		},
		!suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile)
	id, err := b.Build(context)
	if err != nil {
		return job.Error(err)
	}
	if repoName != "" {
		daemon.Repositories().Set(repoName, tag, id, false)
	}
	return engine.StatusOK
}
Ejemplo n.º 7
0
// CmdImageExport exports all images with the given tag. All versions
// containing the same tag are exported. The resulting output is an
// uncompressed tar ball.
// name is the set of tags to export.
// out is the writer where the images are written to.
func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
	if len(job.Args) != 1 {
		return job.Errorf("Usage: %s IMAGE\n", job.Name)
	}
	name := job.Args[0]
	// get image json
	tempdir, err := ioutil.TempDir("", "docker-export-")
	if err != nil {
		return job.Error(err)
	}
	defer os.RemoveAll(tempdir)

	utils.Debugf("Serializing %s", name)

	rootRepoMap := map[string]Repository{}
	rootRepo, err := s.Get(name)
	if err != nil {
		return job.Error(err)
	}
	if rootRepo != nil {
		// this is a base repo name, like 'busybox'

		for _, id := range rootRepo {
			if err := s.exportImage(job.Eng, id, tempdir); err != nil {
				return job.Error(err)
			}
		}
		rootRepoMap[name] = rootRepo
	} else {
		img, err := s.LookupImage(name)
		if err != nil {
			return job.Error(err)
		}
		if img != nil {
			// This is a named image like 'busybox:latest'
			repoName, repoTag := parsers.ParseRepositoryTag(name)
			if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
				return job.Error(err)
			}
			// check this length, because a lookup of a truncated has will not have a tag
			// and will not need to be added to this map
			if len(repoTag) > 0 {
				rootRepoMap[repoName] = Repository{repoTag: img.ID}
			}
		} else {
			// this must be an ID that didn't get looked up just right?
			if err := s.exportImage(job.Eng, name, tempdir); err != nil {
				return job.Error(err)
			}
		}
	}
	// write repositories, if there is something to write
	if len(rootRepoMap) > 0 {
		rootRepoJson, _ := json.Marshal(rootRepoMap)

		if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
			return job.Error(err)
		}
	} else {
		utils.Debugf("There were no repositories to write")
	}

	fs, err := archive.Tar(tempdir, archive.Uncompressed)
	if err != nil {
		return job.Error(err)
	}
	defer fs.Close()

	if _, err := io.Copy(job.Stdout, fs); err != nil {
		return job.Error(err)
	}
	utils.Debugf("End Serializing %s", name)
	return engine.StatusOK
}
Ejemplo n.º 8
0
// CmdImageExport exports all images with the given tag. All versions
// containing the same tag are exported. The resulting output is an
// uncompressed tar ball.
// name is the set of tags to export.
// out is the writer where the images are written to.
func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status {
	if len(job.Args) < 1 {
		return job.Errorf("Usage: %s IMAGE [IMAGE...]\n", job.Name)
	}
	// get image json
	tempdir, err := ioutil.TempDir("", "docker-export-")
	if err != nil {
		return job.Error(err)
	}
	defer os.RemoveAll(tempdir)

	rootRepoMap := map[string]Repository{}
	for _, name := range job.Args {
		log.Debugf("Serializing %s", name)
		rootRepo := s.Repositories[name]
		if rootRepo != nil {
			// this is a base repo name, like 'busybox'
			for _, id := range rootRepo {
				if _, ok := rootRepoMap[name]; !ok {
					rootRepoMap[name] = rootRepo
				} else {
					log.Debugf("Duplicate key [%s]", name)
					if rootRepoMap[name].Contains(rootRepo) {
						log.Debugf("skipping, because it is present [%s:%q]", name, rootRepo)
						continue
					}
					log.Debugf("updating [%s]: [%q] with [%q]", name, rootRepoMap[name], rootRepo)
					rootRepoMap[name].Update(rootRepo)
				}

				if err := s.exportImage(job.Eng, id, tempdir); err != nil {
					return job.Error(err)
				}
			}
		} else {
			img, err := s.LookupImage(name)
			if err != nil {
				return job.Error(err)
			}

			if img != nil {
				// This is a named image like 'busybox:latest'
				repoName, repoTag := parsers.ParseRepositoryTag(name)

				// check this length, because a lookup of a truncated has will not have a tag
				// and will not need to be added to this map
				if len(repoTag) > 0 {
					if _, ok := rootRepoMap[repoName]; !ok {
						rootRepoMap[repoName] = Repository{repoTag: img.ID}
					} else {
						log.Debugf("Duplicate key [%s]", repoName)
						newRepo := Repository{repoTag: img.ID}
						if rootRepoMap[repoName].Contains(newRepo) {
							log.Debugf("skipping, because it is present [%s:%q]", repoName, newRepo)
							continue
						}
						log.Debugf("updating [%s]: [%q] with [%q]", repoName, rootRepoMap[repoName], newRepo)
						rootRepoMap[repoName].Update(newRepo)
					}
				}
				if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil {
					return job.Error(err)
				}

			} else {
				// this must be an ID that didn't get looked up just right?
				if err := s.exportImage(job.Eng, name, tempdir); err != nil {
					return job.Error(err)
				}
			}
		}
		log.Debugf("End Serializing %s", name)
	}
	// write repositories, if there is something to write
	if len(rootRepoMap) > 0 {
		rootRepoJson, _ := json.Marshal(rootRepoMap)
		if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil {
			return job.Error(err)
		}
	} else {
		log.Debugf("There were no repositories to write")
	}

	fs, err := archive.Tar(tempdir, archive.Uncompressed)
	if err != nil {
		return job.Error(err)
	}
	defer fs.Close()

	if _, err := io.Copy(job.Stdout, fs); err != nil {
		return job.Error(err)
	}
	log.Debugf("End export job: %s", job.Name)
	return engine.StatusOK
}
Ejemplo n.º 9
0
func calcCopyInfo(b *Builder, cmdName string, ci *copyInfo, allowRemote bool, allowDecompression bool) error {
	var (
		remoteHash string
		isRemote   bool
	)

	saveOrig := ci.origPath
	isRemote = utils.IsURL(ci.origPath)

	if isRemote && !allowRemote {
		return fmt.Errorf("Source can't be an URL for %s", cmdName)
	} else if isRemote {
		// Initiate the download
		resp, err := utils.Download(ci.origPath)
		if err != nil {
			return err
		}

		// Create a tmp dir
		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
		if err != nil {
			return err
		}
		ci.tmpDir = tmpDirName

		// Create a tmp file within our tmp dir
		tmpFileName := path.Join(tmpDirName, "tmp")
		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
		if err != nil {
			return err
		}

		// Download and dump result to tmp file
		if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
			tmpFile.Close()
			return err
		}
		fmt.Fprintf(b.OutStream, "\n")
		tmpFile.Close()

		// Remove the mtime of the newly created tmp file
		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
			return err
		}

		ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))

		// Process the checksum
		r, err := archive.Tar(tmpFileName, archive.Uncompressed)
		if err != nil {
			return err
		}
		tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
		if err != nil {
			return err
		}
		if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
			return err
		}
		remoteHash = tarSum.Sum(nil)
		r.Close()

		// If the destination is a directory, figure out the filename.
		if strings.HasSuffix(ci.destPath, "/") {
			u, err := url.Parse(saveOrig)
			if err != nil {
				return err
			}
			path := u.Path
			if strings.HasSuffix(path, "/") {
				path = path[:len(path)-1]
			}
			parts := strings.Split(path, "/")
			filename := parts[len(parts)-1]
			if filename == "" {
				return fmt.Errorf("cannot determine filename from url: %s", u)
			}
			ci.destPath = ci.destPath + filename
		}
	}

	if err := b.checkPathForAddition(ci.origPath); err != nil {
		return err
	}

	// Hash path and check the cache
	if b.UtilizeCache {
		var (
			sums = b.context.GetSums()
		)

		if remoteHash != "" {
			ci.hashPath = remoteHash
		} else if fi, err := os.Stat(path.Join(b.contextPath, ci.origPath)); err != nil {
			return err
		} else if fi.IsDir() {
			var subfiles []string
			for _, fileInfo := range sums {
				absFile := path.Join(b.contextPath, fileInfo.Name())
				absOrigPath := path.Join(b.contextPath, ci.origPath)
				if strings.HasPrefix(absFile, absOrigPath) {
					subfiles = append(subfiles, fileInfo.Sum())
				}
			}
			sort.Strings(subfiles)
			hasher := sha256.New()
			hasher.Write([]byte(strings.Join(subfiles, ",")))
			ci.hashPath = "dir:" + hex.EncodeToString(hasher.Sum(nil))
		} else {
			if ci.origPath[0] == '/' && len(ci.origPath) > 1 {
				ci.origPath = ci.origPath[1:]
			}
			ci.origPath = strings.TrimPrefix(ci.origPath, "./")
			// This will match on the first file in sums of the archive
			if fis := sums.GetFile(ci.origPath); fis != nil {
				ci.hashPath = "file:" + fis.Sum()
			}
		}

	}

	if !allowDecompression || isRemote {
		ci.decompress = false
	}
	return nil
}
Ejemplo n.º 10
0
func calcCopyInfo(b *Builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool) error {

	if origPath != "" && origPath[0] == '/' && len(origPath) > 1 {
		origPath = origPath[1:]
	}
	origPath = strings.TrimPrefix(origPath, "./")

	// In the remote/URL case, download it and gen its hashcode
	if utils.IsURL(origPath) {
		if !allowRemote {
			return fmt.Errorf("Source can't be a URL for %s", cmdName)
		}

		ci := copyInfo{}
		ci.origPath = origPath
		ci.hash = origPath // default to this but can change
		ci.destPath = destPath
		ci.decompress = false
		*cInfos = append(*cInfos, &ci)

		// Initiate the download
		resp, err := utils.Download(ci.origPath)
		if err != nil {
			return err
		}

		// Create a tmp dir
		tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote")
		if err != nil {
			return err
		}
		ci.tmpDir = tmpDirName

		// Create a tmp file within our tmp dir
		tmpFileName := path.Join(tmpDirName, "tmp")
		tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
		if err != nil {
			return err
		}

		// Download and dump result to tmp file
		if _, err := io.Copy(tmpFile, utils.ProgressReader(resp.Body, int(resp.ContentLength), b.OutOld, b.StreamFormatter, true, "", "Downloading")); err != nil {
			tmpFile.Close()
			return err
		}
		fmt.Fprintf(b.OutStream, "\n")
		tmpFile.Close()

		// Remove the mtime of the newly created tmp file
		if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil {
			return err
		}

		ci.origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))

		// If the destination is a directory, figure out the filename.
		if strings.HasSuffix(ci.destPath, "/") {
			u, err := url.Parse(origPath)
			if err != nil {
				return err
			}
			path := u.Path
			if strings.HasSuffix(path, "/") {
				path = path[:len(path)-1]
			}
			parts := strings.Split(path, "/")
			filename := parts[len(parts)-1]
			if filename == "" {
				return fmt.Errorf("cannot determine filename from url: %s", u)
			}
			ci.destPath = ci.destPath + filename
		}

		// Calc the checksum, only if we're using the cache
		if b.UtilizeCache {
			r, err := archive.Tar(tmpFileName, archive.Uncompressed)
			if err != nil {
				return err
			}
			tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version0)
			if err != nil {
				return err
			}
			if _, err := io.Copy(ioutil.Discard, tarSum); err != nil {
				return err
			}
			ci.hash = tarSum.Sum(nil)
			r.Close()
		}

		return nil
	}

	// Deal with wildcards
	if ContainsWildcards(origPath) {
		for _, fileInfo := range b.context.GetSums() {
			if fileInfo.Name() == "" {
				continue
			}
			match, _ := path.Match(origPath, fileInfo.Name())
			if !match {
				continue
			}

			calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression)
		}
		return nil
	}

	// Must be a dir or a file

	if err := b.checkPathForAddition(origPath); err != nil {
		return err
	}
	fi, _ := os.Stat(path.Join(b.contextPath, origPath))

	ci := copyInfo{}
	ci.origPath = origPath
	ci.hash = origPath
	ci.destPath = destPath
	ci.decompress = allowDecompression
	*cInfos = append(*cInfos, &ci)

	// If not using cache don't need to do anything else.
	// If we are using a cache then calc the hash for the src file/dir
	if !b.UtilizeCache {
		return nil
	}

	// Deal with the single file case
	if !fi.IsDir() {
		// This will match first file in sums of the archive
		fis := b.context.GetSums().GetFile(ci.origPath)
		if fis != nil {
			ci.hash = "file:" + fis.Sum()
		}
		return nil
	}

	// Must be a dir
	var subfiles []string
	absOrigPath := path.Join(b.contextPath, ci.origPath)

	// Add a trailing / to make sure we only pick up nested files under
	// the dir and not sibling files of the dir that just happen to
	// start with the same chars
	if !strings.HasSuffix(absOrigPath, "/") {
		absOrigPath += "/"
	}

	// Need path w/o / too to find matching dir w/o trailing /
	absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1]

	for _, fileInfo := range b.context.GetSums() {
		absFile := path.Join(b.contextPath, fileInfo.Name())
		if strings.HasPrefix(absFile, absOrigPath) || absFile == absOrigPathNoSlash {
			subfiles = append(subfiles, fileInfo.Sum())
		}
	}
	sort.Strings(subfiles)
	hasher := sha256.New()
	hasher.Write([]byte(strings.Join(subfiles, ",")))
	ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))

	return nil
}