Example #1
0
// MakeTarSumContext returns a build Context from a tar stream.
//
// It extracts the tar stream to a temporary folder that is deleted as soon as
// the Context is closed.
// As the extraction happens, a tarsum is calculated for every file, and the set of
// all those sums then becomes the source of truth for all operations on this Context.
//
// Closing tarStream has to be done by the caller.
func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) {
	root, err := ioutils.TempDir("", "docker-builder")
	if err != nil {
		return nil, err
	}

	tsc := &tarSumContext{root: root}

	// Make sure we clean-up upon error.  In the happy case the caller
	// is expected to manage the clean-up
	defer func() {
		if err != nil {
			tsc.Close()
		}
	}()

	decompressedStream, err := archive.DecompressStream(tarStream)
	if err != nil {
		return nil, err
	}

	sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
	if err != nil {
		return nil, err
	}

	if err := chrootarchive.Untar(sum, root, nil); err != nil {
		return nil, err
	}

	tsc.sums = sum.GetSums()

	return tsc, nil
}
Example #2
0
func (b *builder) readContext(context io.Reader) (err error) {
	tmpdirPath, err := ioutil.TempDir("", "docker-build")
	if err != nil {
		return
	}

	// Make sure we clean-up upon error.  In the happy case the caller
	// is expected to manage the clean-up
	defer func() {
		if err != nil {
			if e := os.RemoveAll(tmpdirPath); e != nil {
				logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e)
			}
		}
	}()

	decompressedStream, err := archive.DecompressStream(context)
	if err != nil {
		return
	}

	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil {
		return
	}

	if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
		return
	}

	b.contextPath = tmpdirPath
	return
}
Example #3
0
func (pm *Manager) createFromContext(ctx context.Context, pluginID, pluginDir string, tarCtx io.Reader, options *types.PluginCreateOptions) error {
	if err := chrootarchive.Untar(tarCtx, pluginDir, nil); err != nil {
		return err
	}

	repoName := options.RepoName
	ref, err := distribution.GetRef(repoName)
	if err != nil {
		return err
	}
	name := ref.Name()
	tag := distribution.GetTag(ref)

	p := v2.NewPlugin(name, pluginID, pm.runRoot, pm.libRoot, tag)
	if err := p.InitPlugin(); err != nil {
		return err
	}

	if err := pm.pluginStore.Add(p); err != nil {
		return err
	}

	pm.pluginEventLogger(p.GetID(), repoName, "create")

	return nil
}
Example #4
0
func (pm *Manager) createFromContext(ctx context.Context, tarCtx io.Reader, pluginDir, repoName string, p *v2.Plugin) error {
	if err := chrootarchive.Untar(tarCtx, pluginDir, nil); err != nil {
		return err
	}

	if err := p.InitPlugin(); err != nil {
		return err
	}

	if err := pm.pluginStore.Add(p); err != nil {
		return err
	}

	pm.pluginEventLogger(p.GetID(), repoName, "create")

	return nil
}
Example #5
0
func (b *Builder) readContext(context io.Reader) error {
	tmpdirPath, err := ioutil.TempDir("", "docker-build")
	if err != nil {
		return err
	}

	decompressedStream, err := archive.DecompressStream(context)
	if err != nil {
		return err
	}

	if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil {
		return err
	}

	if err := chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil {
		return err
	}

	b.contextPath = tmpdirPath
	return nil
}
Example #6
0
// Load uploads a set of images into the repository. This is the complementary of ImageExport.
// The input stream is an uncompressed tar ball containing images and metadata.
func (s *TagStore) Load(inTar io.ReadCloser, outStream io.Writer) error {
	tmpImageDir, err := ioutil.TempDir("", "docker-import-")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tmpImageDir)

	var (
		repoDir = filepath.Join(tmpImageDir, "repo")
	)

	if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
		return err
	}
	images := s.graph.Map()
	excludes := make([]string, len(images))
	i := 0
	for k := range images {
		excludes[i] = k
		i++
	}
	if err := chrootarchive.Untar(inTar, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil {
		return err
	}

	dirs, err := ioutil.ReadDir(repoDir)
	if err != nil {
		return err
	}

	for _, d := range dirs {
		if d.IsDir() {
			if err := s.recursiveLoad(d.Name(), tmpImageDir); err != nil {
				return err
			}
		}
	}

	reposJSONFile, err := os.Open(filepath.Join(tmpImageDir, "repo", "repositories"))
	if err != nil {
		if !os.IsNotExist(err) {
			return err
		}
		return nil
	}
	defer reposJSONFile.Close()

	repositories := map[string]Repository{}
	if err := json.NewDecoder(reposJSONFile).Decode(&repositories); err != nil {
		return err
	}

	for imageName, tagMap := range repositories {
		for tag, address := range tagMap {
			if err := s.setLoad(imageName, tag, address, true, outStream); err != nil {
				return err
			}
		}
	}

	return nil
}
Example #7
0
func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
	var (
		sf             = streamformatter.NewJSONStreamFormatter()
		progressOutput progress.Output
	)
	if !quiet {
		progressOutput = sf.NewProgressOutput(outStream, false)
		outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()}
	}

	tmpDir, err := ioutil.TempDir("", "docker-import-")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tmpDir)

	if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil {
		return err
	}
	// read manifest, if no file then load in legacy mode
	manifestPath, err := safePath(tmpDir, manifestFileName)
	if err != nil {
		return err
	}
	manifestFile, err := os.Open(manifestPath)
	if err != nil {
		if os.IsNotExist(err) {
			return l.legacyLoad(tmpDir, outStream, progressOutput)
		}
		return manifestFile.Close()
	}
	defer manifestFile.Close()

	var manifest []manifestItem
	if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil {
		return err
	}

	var parentLinks []parentLink

	for _, m := range manifest {
		configPath, err := safePath(tmpDir, m.Config)
		if err != nil {
			return err
		}
		config, err := ioutil.ReadFile(configPath)
		if err != nil {
			return err
		}
		img, err := image.NewFromJSON(config)
		if err != nil {
			return err
		}
		var rootFS image.RootFS
		rootFS = *img.RootFS
		rootFS.DiffIDs = nil

		if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual {
			return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual)
		}

		for i, diffID := range img.RootFS.DiffIDs {
			layerPath, err := safePath(tmpDir, m.Layers[i])
			if err != nil {
				return err
			}
			r := rootFS
			r.Append(diffID)
			newLayer, err := l.ls.Get(r.ChainID())
			if err != nil {
				newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), progressOutput)
				if err != nil {
					return err
				}
			}
			defer layer.ReleaseAndLog(l.ls, newLayer)
			if expected, actual := diffID, newLayer.DiffID(); expected != actual {
				return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual)
			}
			rootFS.Append(diffID)
		}

		imgID, err := l.is.Create(config)
		if err != nil {
			return err
		}

		for _, repoTag := range m.RepoTags {
			named, err := reference.ParseNamed(repoTag)
			if err != nil {
				return err
			}
			ref, ok := named.(reference.NamedTagged)
			if !ok {
				return fmt.Errorf("invalid tag %q", repoTag)
			}
			l.setLoadedTag(ref, imgID, outStream)
		}

		parentLinks = append(parentLinks, parentLink{imgID, m.Parent})
		l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load")
	}

	for _, p := range validatedParentLinks(parentLinks) {
		if p.parentID != "" {
			if err := l.setParentID(p.id, p.parentID); err != nil {
				return err
			}
		}
	}

	return nil
}
Example #8
0
// Loads a set of images into the repository. This is the complementary of ImageExport.
// The input stream is an uncompressed tar ball containing images and metadata.
func (s *TagStore) CmdLoad(job *engine.Job) engine.Status {
	tmpImageDir, err := ioutil.TempDir("", "docker-import-")
	if err != nil {
		return job.Error(err)
	}
	defer os.RemoveAll(tmpImageDir)

	var (
		repoTarFile = path.Join(tmpImageDir, "repo.tar")
		repoDir     = path.Join(tmpImageDir, "repo")
	)

	tarFile, err := os.Create(repoTarFile)
	if err != nil {
		return job.Error(err)
	}
	if _, err := io.Copy(tarFile, job.Stdin); err != nil {
		return job.Error(err)
	}
	tarFile.Close()

	repoFile, err := os.Open(repoTarFile)
	if err != nil {
		return job.Error(err)
	}
	if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
		return job.Error(err)
	}
	images, err := s.graph.Map()
	if err != nil {
		return job.Error(err)
	}
	excludes := make([]string, len(images))
	i := 0
	for k := range images {
		excludes[i] = k
		i++
	}
	if err := chrootarchive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil {
		return job.Error(err)
	}

	dirs, err := ioutil.ReadDir(repoDir)
	if err != nil {
		return job.Error(err)
	}

	for _, d := range dirs {
		if d.IsDir() {
			if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
				return job.Error(err)
			}
		}
	}

	repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
	if err == nil {
		repositories := map[string]Repository{}
		if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
			return job.Error(err)
		}

		for imageName, tagMap := range repositories {
			for tag, address := range tagMap {
				if err := s.Set(imageName, tag, address, true); err != nil {
					return job.Error(err)
				}
			}
		}
	} else if !os.IsNotExist(err) {
		return job.Error(err)
	}

	return engine.StatusOK
}
// ExtractToDir extracts the given tar archive to the specified location in the
// filesystem of this container. The given path must be of a directory in the
// container. If it is not, the error will be ErrExtractPointNotDirectory. If
// noOverwriteDirNonDir is true then it will be an error if unpacking the
// given content would cause an existing directory to be replaced with a non-
// directory and vice versa.
func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
	container.Lock()
	defer container.Unlock()

	if err = container.Mount(); err != nil {
		return err
	}
	defer container.Unmount()

	err = container.mountVolumes()
	defer container.unmountVolumes(true)
	if err != nil {
		return err
	}

	// The destination path needs to be resolved to a host path, with all
	// symbolic links followed in the scope of the container's rootfs. Note
	// that we do not use `container.resolvePath(path)` here because we need
	// to also evaluate the last path element if it is a symlink. This is so
	// that you can extract an archive to a symlink that points to a directory.

	// Consider the given path as an absolute path in the container.
	absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)

	// This will evaluate the last path element if it is a symlink.
	resolvedPath, err := container.GetResourcePath(absPath)
	if err != nil {
		return err
	}

	stat, err := os.Lstat(resolvedPath)
	if err != nil {
		return err
	}

	if !stat.IsDir() {
		return ErrExtractPointNotDirectory
	}

	// Need to check if the path is in a volume. If it is, it cannot be in a
	// read-only volume. If it is not in a volume, the container cannot be
	// configured with a read-only rootfs.

	// Use the resolved path relative to the container rootfs as the new
	// absPath. This way we fully follow any symlinks in a volume that may
	// lead back outside the volume.
	//
	// The Windows implementation of filepath.Rel in golang 1.4 does not
	// support volume style file path semantics. On Windows when using the
	// filter driver, we are guaranteed that the path will always be
	// a volume file path.
	var baseRel string
	if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
		if strings.HasPrefix(resolvedPath, container.basefs) {
			baseRel = resolvedPath[len(container.basefs):]
			if baseRel[:1] == `\` {
				baseRel = baseRel[1:]
			}
		}
	} else {
		baseRel, err = filepath.Rel(container.basefs, resolvedPath)
	}
	if err != nil {
		return err
	}
	// Make it an absolute path.
	absPath = filepath.Join(string(filepath.Separator), baseRel)

	toVolume, err := checkIfPathIsInAVolume(container, absPath)
	if err != nil {
		return err
	}

	if !toVolume && container.hostConfig.ReadonlyRootfs {
		return ErrRootFSReadOnly
	}

	options := &archive.TarOptions{
		ChownOpts: &archive.TarChownOptions{
			UID: 0, GID: 0, // TODO: use config.User? Remap to userns root?
		},
		NoOverwriteDirNonDir: noOverwriteDirNonDir,
	}

	if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
		return err
	}

	container.logEvent("extract-to-dir")

	return nil
}
Example #10
0
func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer) error {
	tmpDir, err := ioutil.TempDir("", "docker-import-")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tmpDir)

	if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil {
		return err
	}
	// read manifest, if no file then load in legacy mode
	manifestPath, err := safePath(tmpDir, manifestFileName)
	if err != nil {
		return err
	}
	manifestFile, err := os.Open(manifestPath)
	if err != nil {
		if os.IsNotExist(err) {
			return l.legacyLoad(tmpDir, outStream)
		}
		return manifestFile.Close()
	}
	defer manifestFile.Close()

	var manifest []manifestItem
	if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil {
		return err
	}

	for _, m := range manifest {
		configPath, err := safePath(tmpDir, m.Config)
		if err != nil {
			return err
		}
		config, err := ioutil.ReadFile(configPath)
		if err != nil {
			return err
		}
		img, err := image.NewFromJSON(config)
		if err != nil {
			return err
		}
		var rootFS image.RootFS
		rootFS = *img.RootFS
		rootFS.DiffIDs = nil

		if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual {
			return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual)
		}

		for i, diffID := range img.RootFS.DiffIDs {
			layerPath, err := safePath(tmpDir, m.Layers[i])
			if err != nil {
				return err
			}
			newLayer, err := l.loadLayer(layerPath, rootFS)
			if err != nil {
				return err
			}
			defer layer.ReleaseAndLog(l.ls, newLayer)
			if expected, actual := diffID, newLayer.DiffID(); expected != actual {
				return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual)
			}
			rootFS.Append(diffID)
		}

		imgID, err := l.is.Create(config)
		if err != nil {
			return err
		}

		for _, repoTag := range m.RepoTags {
			named, err := reference.ParseNamed(repoTag)
			if err != nil {
				return err
			}
			ref, ok := named.(reference.NamedTagged)
			if !ok {
				return fmt.Errorf("invalid tag %q", repoTag)
			}
			l.setLoadedTag(ref, imgID, outStream)
		}

	}

	return nil
}
Example #11
0
func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error {
	return chrootarchive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil)
}
Example #12
0
// Loads a set of images into the repository. This is the complementary of ImageExport.
// The input stream is an uncompressed tar ball containing images and metadata.
func (s *TagStore) CmdLoad(job *engine.Job) error {
	tmpImageDir, err := ioutil.TempDir("", "docker-import-")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tmpImageDir)

	var (
		repoDir = path.Join(tmpImageDir, "repo")
	)

	if err := os.Mkdir(repoDir, os.ModeDir); err != nil {
		return err
	}
	images, err := s.graph.Map()
	if err != nil {
		return err
	}
	excludes := make([]string, len(images))
	i := 0
	for k := range images {
		excludes[i] = k
		i++
	}
	if err := chrootarchive.Untar(job.Stdin, repoDir, &archive.TarOptions{ExcludePatterns: excludes}); err != nil {
		return err
	}

	dirs, err := ioutil.ReadDir(repoDir)
	if err != nil {
		return err
	}

	for _, d := range dirs {
		if d.IsDir() {
			if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil {
				return err
			}
		}
	}

	repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories"))
	if err == nil {
		repositories := map[string]Repository{}
		if err := json.Unmarshal(repositoriesJson, &repositories); err != nil {
			return err
		}

		for imageName, tagMap := range repositories {
			for tag, address := range tagMap {
				if err := s.SetLoad(imageName, tag, address, true, job.Stdout); err != nil {
					return err
				}
			}
		}
	} else if !os.IsNotExist(err) {
		return err
	}

	return nil
}
Example #13
0
// ExtractToDir extracts the given tar archive to the specified location in the
// filesystem of this container. The given path must be of a directory in the
// container. If it is not, the error will be ErrExtractPointNotDirectory. If
// noOverwriteDirNonDir is true then it will be an error if unpacking the
// given content would cause an existing directory to be replaced with a non-
// directory and vice versa.
func (container *Container) ExtractToDir(path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
	container.Lock()
	defer container.Unlock()

	if err = container.Mount(); err != nil {
		return err
	}
	defer container.Unmount()

	err = container.mountVolumes()
	defer container.UnmountVolumes(true)
	if err != nil {
		return err
	}

	// Consider the given path as an absolute path in the container.
	absPath := path
	if !filepath.IsAbs(absPath) {
		absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join("/", path), path)
	}

	resolvedPath, err := container.GetResourcePath(absPath)
	if err != nil {
		return err
	}

	// A trailing "." or separator has important meaning. For example, if
	// `"foo"` is a symlink to some directory `"dir"`, then `os.Lstat("foo")`
	// will stat the link itself, while `os.Lstat("foo/")` will stat the link
	// target. If the basename of the path is ".", it means to archive the
	// contents of the directory with "." as the first path component rather
	// than the name of the directory. This would cause extraction of the
	// archive to *not* make another directory, but instead use the current
	// directory.
	resolvedPath = archive.PreserveTrailingDotOrSeparator(resolvedPath, absPath)

	stat, err := os.Lstat(resolvedPath)
	if err != nil {
		return err
	}

	if !stat.IsDir() {
		return ErrExtractPointNotDirectory
	}

	baseRel, err := filepath.Rel(container.basefs, resolvedPath)
	if err != nil {
		return err
	}
	absPath = filepath.Join("/", baseRel)

	// Need to check if the path is in a volume. If it is, it cannot be in a
	// read-only volume. If it is not in a volume, the container cannot be
	// configured with a read-only rootfs.
	var toVolume bool
	for _, mnt := range container.MountPoints {
		if toVolume = mnt.hasResource(absPath); toVolume {
			if mnt.RW {
				break
			}
			return ErrVolumeReadonly
		}
	}

	if !toVolume && container.hostConfig.ReadonlyRootfs {
		return ErrContainerRootfsReadonly
	}

	options := &archive.TarOptions{
		ChownOpts: &archive.TarChownOptions{
			UID: 0, GID: 0, // TODO: use config.User? Remap to userns root?
		},
		NoOverwriteDirNonDir: noOverwriteDirNonDir,
	}

	if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
		return err
	}

	container.LogEvent("extract-to-dir")

	return nil
}
Example #14
0
// CreateFromContext creates a plugin from the given pluginDir which contains
// both the rootfs and the config.json and a repoName with optional tag.
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
	pm.muGC.RLock()
	defer pm.muGC.RUnlock()

	ref, err := reference.ParseNamed(options.RepoName)
	if err != nil {
		return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
	}
	if _, ok := ref.(reference.Canonical); ok {
		return errors.Errorf("canonical references are not permitted")
	}
	name := reference.WithDefaultTag(ref).String()

	if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
		return err
	}

	tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
	defer os.RemoveAll(tmpRootFSDir)
	if err != nil {
		return errors.Wrap(err, "failed to create temp directory")
	}
	var configJSON []byte
	rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)

	rootFSBlob, err := pm.blobStore.New()
	if err != nil {
		return err
	}
	defer rootFSBlob.Close()
	gzw := gzip.NewWriter(rootFSBlob)
	layerDigester := digest.Canonical.Digester()
	rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash()))

	if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
		return err
	}
	if err := rootFS.Close(); err != nil {
		return err
	}

	if configJSON == nil {
		return errors.New("config not found")
	}

	if err := gzw.Close(); err != nil {
		return errors.Wrap(err, "error closing gzip writer")
	}

	var config types.PluginConfig
	if err := json.Unmarshal(configJSON, &config); err != nil {
		return errors.Wrap(err, "failed to parse config")
	}

	if err := pm.validateConfig(config); err != nil {
		return err
	}

	pm.mu.Lock()
	defer pm.mu.Unlock()

	rootFSBlobsum, err := rootFSBlob.Commit()
	if err != nil {
		return err
	}
	defer func() {
		if err != nil {
			go pm.GC()
		}
	}()

	config.Rootfs = &types.PluginConfigRootfs{
		Type:    "layers",
		DiffIds: []string{layerDigester.Digest().String()},
	}

	configBlob, err := pm.blobStore.New()
	if err != nil {
		return err
	}
	defer configBlob.Close()
	if err := json.NewEncoder(configBlob).Encode(config); err != nil {
		return errors.Wrap(err, "error encoding json config")
	}
	configBlobsum, err := configBlob.Commit()
	if err != nil {
		return err
	}

	p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil)
	if err != nil {
		return err
	}

	pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")

	return nil
}
Example #15
0
// containerExtractToDir extracts the given tar archive to the specified location in the
// filesystem of this container. The given path must be of a directory in the
// container. If it is not, the error will be ErrExtractPointNotDirectory. If
// noOverwriteDirNonDir is true then it will be an error if unpacking the
// given content would cause an existing directory to be replaced with a non-
// directory and vice versa.
func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) {
	container.Lock()
	defer container.Unlock()

	if err = daemon.Mount(container); err != nil {
		return err
	}
	defer daemon.Unmount(container)

	err = daemon.mountVolumes(container)
	defer container.DetachAndUnmount(daemon.LogVolumeEvent)
	if err != nil {
		return err
	}

	// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
	if err != nil {
		return err
	}

	// The destination path needs to be resolved to a host path, with all
	// symbolic links followed in the scope of the container's rootfs. Note
	// that we do not use `container.ResolvePath(path)` here because we need
	// to also evaluate the last path element if it is a symlink. This is so
	// that you can extract an archive to a symlink that points to a directory.

	// Consider the given path as an absolute path in the container.
	absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)

	// This will evaluate the last path element if it is a symlink.
	resolvedPath, err := container.GetResourcePath(absPath)
	if err != nil {
		return err
	}

	stat, err := os.Lstat(resolvedPath)
	if err != nil {
		return err
	}

	if !stat.IsDir() {
		return ErrExtractPointNotDirectory
	}

	// Need to check if the path is in a volume. If it is, it cannot be in a
	// read-only volume. If it is not in a volume, the container cannot be
	// configured with a read-only rootfs.

	// Use the resolved path relative to the container rootfs as the new
	// absPath. This way we fully follow any symlinks in a volume that may
	// lead back outside the volume.
	//
	// The Windows implementation of filepath.Rel in golang 1.4 does not
	// support volume style file path semantics. On Windows when using the
	// filter driver, we are guaranteed that the path will always be
	// a volume file path.
	var baseRel string
	if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
		if strings.HasPrefix(resolvedPath, container.BaseFS) {
			baseRel = resolvedPath[len(container.BaseFS):]
			if baseRel[:1] == `\` {
				baseRel = baseRel[1:]
			}
		}
	} else {
		baseRel, err = filepath.Rel(container.BaseFS, resolvedPath)
	}
	if err != nil {
		return err
	}
	// Make it an absolute path.
	absPath = filepath.Join(string(filepath.Separator), baseRel)

	toVolume, err := checkIfPathIsInAVolume(container, absPath)
	if err != nil {
		return err
	}

	if !toVolume && container.HostConfig.ReadonlyRootfs {
		return ErrRootFSReadOnly
	}

	uid, gid := daemon.GetRemappedUIDGID()
	options := &archive.TarOptions{
		NoOverwriteDirNonDir: noOverwriteDirNonDir,
		ChownOpts: &archive.TarChownOptions{
			UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)?
		},
	}
	if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
		return err
	}

	daemon.LogContainerEvent(container, "extract-to-dir")

	return nil
}