func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { img, err := s.is.Get(id) if err != nil { return nil, err } if len(img.RootFS.DiffIDs) == 0 { return nil, fmt.Errorf("empty export - not implemented") } var parent digest.Digest var layers []string var foreignSrcs map[layer.DiffID]distribution.Descriptor for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{ Created: img.Created, } if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return nil, err } v1Img.ID = v1ID.Hex() if parent != "" { v1Img.Parent = parent.Hex() } src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) if err != nil { return nil, err } layers = append(layers, v1Img.ID) parent = v1ID if src.Digest != "" { if foreignSrcs == nil { foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) } foreignSrcs[img.RootFS.DiffIDs[i]] = src } } configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { return nil, err } if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { return nil, err } s.images[id].layers = layers return foreignSrcs, nil }
func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) error { if _, exists := s.savedLayers[legacyImg.ID]; exists { return nil } outDir := filepath.Join(s.outDir, legacyImg.ID) if err := os.Mkdir(outDir, 0755); err != nil { return err } // todo: why is this version file here? if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { return err } imageConfig, err := json.Marshal(legacyImg) if err != nil { return err } if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { return err } // serialize filesystem tarFile, err := os.Create(filepath.Join(outDir, legacyLayerFileName)) if err != nil { return err } defer tarFile.Close() l, err := s.ls.Get(id) if err != nil { return err } defer layer.ReleaseAndLog(s.ls, l) arch, err := l.TarStream() if err != nil { return err } defer arch.Close() if _, err := io.Copy(tarFile, arch); err != nil { return err } for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { // todo: maybe save layer created timestamp? if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { return err } } s.savedLayers[legacyImg.ID] = struct{}{} return nil }
func createSampleDir(t *testing.T, root string) { files := []FileData{ {Regular, "file1", "file1\n", 0600}, {Regular, "file2", "file2\n", 0666}, {Regular, "file3", "file3\n", 0404}, {Regular, "file4", "file4\n", 0600}, {Regular, "file5", "file5\n", 0600}, {Regular, "file6", "file6\n", 0600}, {Regular, "file7", "file7\n", 0600}, {Dir, "dir1", "", 0740}, {Regular, "dir1/file1-1", "file1-1\n", 01444}, {Regular, "dir1/file1-2", "file1-2\n", 0666}, {Dir, "dir2", "", 0700}, {Regular, "dir2/file2-1", "file2-1\n", 0666}, {Regular, "dir2/file2-2", "file2-2\n", 0666}, {Dir, "dir3", "", 0700}, {Regular, "dir3/file3-1", "file3-1\n", 0666}, {Regular, "dir3/file3-2", "file3-2\n", 0666}, {Dir, "dir4", "", 0700}, {Regular, "dir4/file3-1", "file4-1\n", 0666}, {Regular, "dir4/file3-2", "file4-2\n", 0666}, {Symlink, "symlink1", "target1", 0666}, {Symlink, "symlink2", "target2", 0666}, {Symlink, "symlink3", root + "/file1", 0666}, {Symlink, "symlink4", root + "/symlink3", 0666}, {Symlink, "dirSymlink", root + "/dir1", 0740}, } now := time.Now() for _, info := range files { p := path.Join(root, info.path) if info.filetype == Dir { if err := os.MkdirAll(p, info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Regular { if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { t.Fatal(err) } } else if info.filetype == Symlink { if err := os.Symlink(info.contents, p); err != nil { t.Fatal(err) } } if info.filetype != Symlink { // Set a consistent ctime, atime for all files and dirs if err := system.Chtimes(p, now, now); err != nil { t.Fatal(err) } } } }
func (s *saveSession) saveImage(id image.ID) error { img, err := s.is.Get(id) if err != nil { return err } if len(img.RootFS.DiffIDs) == 0 { return fmt.Errorf("empty export - not implemented") } var parent digest.Digest var layers []string for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{} if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return err } v1Img.ID = v1ID.Hex() if parent != "" { v1Img.Parent = parent.Hex() } if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { return err } layers = append(layers, v1Img.ID) parent = v1ID } configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { return err } if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { return err } s.images[id].layers = layers return nil }
func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { if _, exists := s.savedLayers[legacyImg.ID]; exists { return distribution.Descriptor{}, nil } outDir := filepath.Join(s.outDir, legacyImg.ID) if err := os.Mkdir(outDir, 0755); err != nil { return distribution.Descriptor{}, err } // todo: why is this version file here? if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { return distribution.Descriptor{}, err } imageConfig, err := json.Marshal(legacyImg) if err != nil { return distribution.Descriptor{}, err } if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { return distribution.Descriptor{}, err } // serialize filesystem layerPath := filepath.Join(outDir, legacyLayerFileName) l, err := s.ls.Get(id) if err != nil { return distribution.Descriptor{}, err } defer layer.ReleaseAndLog(s.ls, l) if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { relPath, err := filepath.Rel(outDir, oldPath) if err != nil { return distribution.Descriptor{}, err } os.Symlink(relPath, layerPath) } else { tarFile, err := os.Create(layerPath) if err != nil { return distribution.Descriptor{}, err } defer tarFile.Close() arch, err := l.TarStream() if err != nil { return distribution.Descriptor{}, err } defer arch.Close() if _, err := io.Copy(tarFile, arch); err != nil { return distribution.Descriptor{}, err } for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { // todo: maybe save layer created timestamp? if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { return distribution.Descriptor{}, err } } s.diffIDPaths[l.DiffID()] = layerPath } s.savedLayers[legacyImg.ID] = struct{}{} var src distribution.Descriptor if fs, ok := l.(distribution.Describable); ok { src = fs.Descriptor() } return src, nil }
func (s *saveSession) save(outStream io.Writer) error { s.savedLayers = make(map[string]struct{}) s.diffIDPaths = make(map[layer.DiffID]string) // get image json tempDir, err := ioutil.TempDir("", "docker-export-") if err != nil { return err } defer os.RemoveAll(tempDir) s.outDir = tempDir reposLegacy := make(map[string]map[string]string) var manifest []manifestItem var parentLinks []parentLink for id, imageDescr := range s.images { foreignSrcs, err := s.saveImage(id) if err != nil { return err } var repoTags []string var layers []string for _, ref := range imageDescr.refs { if _, ok := reposLegacy[ref.Name()]; !ok { reposLegacy[ref.Name()] = make(map[string]string) } reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] repoTags = append(repoTags, ref.String()) } for _, l := range imageDescr.layers { layers = append(layers, filepath.Join(l, legacyLayerFileName)) } manifest = append(manifest, manifestItem{ Config: digest.Digest(id).Hex() + ".json", RepoTags: repoTags, Layers: layers, LayerSources: foreignSrcs, }) parentID, _ := s.is.GetParent(id) parentLinks = append(parentLinks, parentLink{id, parentID}) s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") } for i, p := range validatedParentLinks(parentLinks) { if p.parentID != "" { manifest[i].Parent = p.parentID } } if len(reposLegacy) > 0 { reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { f.Close() return err } if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { return err } if err := f.Close(); err != nil { return err } if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } } manifestFileName := filepath.Join(tempDir, manifestFileName) f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { f.Close() return err } if err := json.NewEncoder(f).Encode(manifest); err != nil { return err } if err := f.Close(); err != nil { return err } if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } fs, err := archive.Tar(tempDir, archive.Uncompressed) if err != nil { return err } defer fs.Close() if _, err := io.Copy(outStream, fs); err != nil { return err } return nil }
func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { // get filename from URL u, err := url.Parse(srcURL) if err != nil { return } path := filepath.FromSlash(u.Path) // Ensure in platform semantics if strings.HasSuffix(path, string(os.PathSeparator)) { path = path[:len(path)-1] } parts := strings.Split(path, string(os.PathSeparator)) filename := parts[len(parts)-1] if filename == "" { err = fmt.Errorf("cannot determine filename from url: %s", u) return } // Initiate the download resp, err := httputils.Download(srcURL) if err != nil { return } // Prepare file in a tmp dir tmpDir, err := ioutils.TempDir("", "docker-remote") if err != nil { return } defer func() { if err != nil { os.RemoveAll(tmpDir) } }() tmpFileName := filepath.Join(tmpDir, filename) tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return } stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") // Download and dump result to tmp file if _, err = io.Copy(tmpFile, progressReader); err != nil { tmpFile.Close() return } fmt.Fprintln(b.Stdout) // ignoring error because the file was already opened successfully tmpFileSt, err := tmpFile.Stat() if err != nil { return } tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime mTime := time.Time{} lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if parsedMTime, err := http.ParseTime(lastMod); err == nil { mTime = parsedMTime } } if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { return } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return } if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { return } hash := tarSum.Sum(nil) r.Close() return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil }
// Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return err } whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) // if the options contain a uid & gid maps, convert header uid/gid // entries using the maps such that lchown sets the proper mapped // uid/gid after writing the file. We only perform this mapping if // the file isn't already owned by the remapped root UID or GID, as // that specific uid/gid has no mapping from container -> host, and // those files already have the proper ownership for inside the // container. if hdr.Uid != remappedRootUID { xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) if err != nil { return err } hdr.Uid = xUID } if hdr.Gid != remappedRootGID { xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) if err != nil { return err } hdr.Gid = xGID } if whiteoutConverter != nil { writeFile, err := whiteoutConverter.ConvertRead(hdr, path) if err != nil { return err } if !writeFile { continue } } if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil }
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error { // hdr.Mode is in linux format, which we can use for sycalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) hdrInfo := hdr.FileInfo() switch hdr.Typeflag { case tar.TypeDir: // Create directory unless it exists as a directory already. // In that case we just want to merge the two if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { return err } } case tar.TypeReg, tar.TypeRegA: // Source is regular file file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) if err != nil { return err } if _, err := io.Copy(file, reader); err != nil { file.Close() return err } file.Close() case tar.TypeBlock, tar.TypeChar: if inUserns { // cannot create devices in a userns return nil } // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeFifo: // Handle this is an OS-specific way if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { return err } case tar.TypeLink: targetPath := filepath.Join(extractDir, hdr.Linkname) // check for hardlink breakout if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: // path -> hdr.Linkname = targetPath // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because // that symlink would first have to be created, which would be caught earlier, at this very check: if !strings.HasPrefix(targetPath, extractDir) { return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } case tar.TypeXGlobalHeader: logrus.Debug("PAX Global Extended Headers found and ignored") return nil default: return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) } // Lchown is not supported on Windows. if Lchown && runtime.GOOS != "windows" { if chownOpts == nil { chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} } if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { return err } } var errors []string for key, value := range hdr.Xattrs { if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { if err == syscall.ENOTSUP { // We ignore errors here because not all graphdrivers support // xattrs *cough* old versions of AUFS *cough*. However only // ENOTSUP should be emitted in that case, otherwise we still // bail. errors = append(errors, err.Error()) continue } return err } } if len(errors) > 0 { logrus.WithFields(logrus.Fields{ "errors": errors, }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") } // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if err := handleLChmod(hdr, path, hdrInfo); err != nil { return err } aTime := hdr.AccessTime if aTime.Before(hdr.ModTime) { // Last access time should never be before last modified time. aTime = hdr.ModTime } // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } } return nil }
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be // compressed or uncompressed. // Returns the size in bytes of the contents of the layer. func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { tr := tar.NewReader(layer) trBuf := pools.BufioReader32KPool.Get(tr) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header unpackedPaths := make(map[string]struct{}) if options == nil { options = &TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return 0, err } aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) if options == nil { options = &TarOptions{} } // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return 0, err } size += hdr.Size // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) // Windows does not support filenames with colons in them. Ignore // these files. This is not a problem though (although it might // appear that it is). Let's suppose a client is running docker pull. // The daemon it points to is Windows. Would it make sense for the // client to be doing a docker pull Ubuntu for example (which has files // with colons in the name under /usr/share/man/man3)? No, absolutely // not as it would really only make sense that they were pulling a // Windows image. However, for development, it is necessary to be able // to pull Linux images which are in the repository. // // TODO Windows. Once the registry is aware of what images are Windows- // specific or Linux-specific, this warning should be changed to an error // to cater for the situation where someone does manage to upload a Linux // image but have it tagged as Windows inadvertently. if runtime.GOOS == "windows" { if strings.Contains(hdr.Name, ":") { logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) continue } } // Note as these operations are platform specific, so must the slash be. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists. // This happened in some tests where an image had a tarfile without any // parent directories. parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = system.MkdirAll(parentPath, 0600) if err != nil { return 0, err } } } // Skip AUFS metadata dirs if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { // Regular files inside /.wh..wh.plnk can be used as hardlink targets // We don't want this directory, but we need the files in them so that // such hardlinks can be resolved. if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { return 0, err } defer os.RemoveAll(aufsTempdir) } if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { return 0, err } } if hdr.Name != WhiteoutOpaqueDir { continue } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return 0, err } // Note as these operations are platform specific, so must the slash be. if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } base := filepath.Base(path) if strings.HasPrefix(base, WhiteoutPrefix) { dir := filepath.Dir(path) if base == WhiteoutOpaqueDir { _, err := os.Lstat(dir) if err != nil { return 0, err } err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { if os.IsNotExist(err) { err = nil // parent was deleted } return err } if path == dir { return nil } if _, exists := unpackedPaths[path]; !exists { err := os.RemoveAll(path) return err } return nil }) if err != nil { return 0, err } } else { originalBase := base[len(WhiteoutPrefix):] originalPath := filepath.Join(dir, originalBase) if err := os.RemoveAll(originalPath); err != nil { return 0, err } } } else { // If path exits we almost always just want to remove and replace it. // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return 0, err } } } trBuf.Reset(tr) srcData := io.Reader(trBuf) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so // we manually retarget these into the temporary files we extracted them into if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { return 0, fmt.Errorf("Invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { return 0, err } defer tmpFile.Close() srcData = tmpFile } // if the options contain a uid & gid maps, convert header uid/gid // entries using the maps such that lchown sets the proper mapped // uid/gid after writing the file. We only perform this mapping if // the file isn't already owned by the remapped root UID or GID, as // that specific uid/gid has no mapping from container -> host, and // those files already have the proper ownership for inside the // container. if srcHdr.Uid != remappedRootUID { xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) if err != nil { return 0, err } srcHdr.Uid = xUID } if srcHdr.Gid != remappedRootGID { xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) if err != nil { return 0, err } srcHdr.Gid = xGID } if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { return 0, err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } unpackedPaths[path] = struct{}{} } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return 0, err } } return size, nil }
func copyDir(srcDir, dstDir string, flags copyFlags) error { err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { if err != nil { return err } // Rebase path relPath, err := filepath.Rel(srcDir, srcPath) if err != nil { return err } dstPath := filepath.Join(dstDir, relPath) if err != nil { return err } stat, ok := f.Sys().(*syscall.Stat_t) if !ok { return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) } isHardlink := false switch f.Mode() & os.ModeType { case 0: // Regular file if flags©Hardlink != 0 { isHardlink = true if err := os.Link(srcPath, dstPath); err != nil { return err } } else { if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { return err } } case os.ModeDir: if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { return err } case os.ModeSymlink: link, err := os.Readlink(srcPath) if err != nil { return err } if err := os.Symlink(link, dstPath); err != nil { return err } case os.ModeNamedPipe: fallthrough case os.ModeSocket: if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { return err } case os.ModeDevice: if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { return err } default: return fmt.Errorf("Unknown file type for %s\n", srcPath) } // Everything below is copying metadata from src to dst. All this metadata // already shares an inode for hardlinks. if isHardlink { return nil } if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { return err } if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { return err } // We need to copy this attribute if it appears in an overlay upper layer, as // this function is used to copy those. It is set by overlay if a directory // is removed and then re-created and should not inherit anything from the // same dir in the lower dir. if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { return err } isSymlink := f.Mode()&os.ModeSymlink != 0 // There is no LChmod, so ignore mode for symlink. Also, this // must happen after chown, as that can modify the file mode if !isSymlink { if err := os.Chmod(dstPath, f.Mode()); err != nil { return err } } // system.Chtimes doesn't support a NOFOLLOW flag atm if !isSymlink { aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) if err := system.Chtimes(dstPath, aTime, mTime); err != nil { return err } } else { ts := []syscall.Timespec{stat.Atim, stat.Mtim} if err := system.LUtimesNano(dstPath, ts); err != nil { return err } } return nil }) return err }
func calcCopyInfo(b *builder, cmdName string, cInfos *[]*copyInfo, origPath string, destPath string, allowRemote bool, allowDecompression bool, allowWildcards bool) error { // Work in daemon-specific OS filepath semantics. However, we save // the the origPath passed in here, as it might also be a URL which // we need to check for in this function. passedInOrigPath := origPath origPath = filepath.FromSlash(origPath) destPath = filepath.FromSlash(destPath) if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) // Twiddle the destPath when its a relative path - meaning, make it // relative to the WORKINGDIR if !system.IsAbs(destPath) { hasSlash := strings.HasSuffix(destPath, string(os.PathSeparator)) destPath = filepath.Join(string(os.PathSeparator), filepath.FromSlash(b.Config.WorkingDir), destPath) // Make sure we preserve any trailing slash if hasSlash { destPath += string(os.PathSeparator) } } // In the remote/URL case, download it and gen its hashcode if urlutil.IsURL(passedInOrigPath) { // As it's a URL, we go back to processing on what was passed in // to this function origPath = passedInOrigPath if !allowRemote { return fmt.Errorf("Source can't be a URL for %s", cmdName) } ci := copyInfo{} ci.origPath = origPath ci.hash = origPath // default to this but can change ci.destPath = destPath ci.decompress = false *cInfos = append(*cInfos, &ci) // Initiate the download resp, err := httputils.Download(ci.origPath) if err != nil { return err } // Create a tmp dir tmpDirName, err := getTempDir(b.contextPath, "docker-remote") if err != nil { return err } ci.tmpDir = tmpDirName // Create a tmp file within our tmp dir tmpFileName := filepath.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } // Download and dump result to tmp file if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ In: resp.Body, Out: b.OutOld, Formatter: b.StreamFormatter, Size: resp.ContentLength, NewLines: true, ID: "", Action: "Downloading", })); err != nil { tmpFile.Close() return err } fmt.Fprintf(b.OutStream, "\n") tmpFile.Close() // Set the mtime to the Last-Modified header value if present // Otherwise just remove atime and mtime mTime := time.Time{} lastMod := resp.Header.Get("Last-Modified") if lastMod != "" { // If we can't parse it then just let it default to 'zero' // otherwise use the parsed time value if parsedMTime, err := http.ParseTime(lastMod); err == nil { mTime = parsedMTime } } if err := system.Chtimes(tmpFileName, time.Time{}, mTime); err != nil { return err } ci.origPath = filepath.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // If the destination is a directory, figure out the filename. if strings.HasSuffix(ci.destPath, string(os.PathSeparator)) { u, err := url.Parse(origPath) if err != nil { return err } path := filepath.FromSlash(u.Path) // Ensure in platform semantics if strings.HasSuffix(path, string(os.PathSeparator)) { path = path[:len(path)-1] } parts := strings.Split(path, string(os.PathSeparator)) filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } ci.destPath = ci.destPath + filename } // Calc the checksum, even if we're using the cache r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) if err != nil { return err } if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } ci.hash = tarSum.Sum(nil) r.Close() return nil } // Deal with wildcards if allowWildcards && containsWildcards(origPath) { for _, fileInfo := range b.context.GetSums() { if fileInfo.Name() == "" { continue } match, _ := filepath.Match(origPath, fileInfo.Name()) if !match { continue } // Note we set allowWildcards to false in case the name has // a * in it calcCopyInfo(b, cmdName, cInfos, fileInfo.Name(), destPath, allowRemote, allowDecompression, false) } return nil } // Must be a dir or a file if err := b.checkPathForAddition(origPath); err != nil { return err } fi, _ := os.Stat(filepath.Join(b.contextPath, origPath)) ci := copyInfo{} ci.origPath = origPath ci.hash = origPath ci.destPath = destPath ci.decompress = allowDecompression *cInfos = append(*cInfos, &ci) // Deal with the single file case if !fi.IsDir() { // This will match first file in sums of the archive fis := b.context.GetSums().GetFile(ci.origPath) if fis != nil { ci.hash = "file:" + fis.Sum() } return nil } // Must be a dir var subfiles []string absOrigPath := filepath.Join(b.contextPath, ci.origPath) // Add a trailing / to make sure we only pick up nested files under // the dir and not sibling files of the dir that just happen to // start with the same chars if !strings.HasSuffix(absOrigPath, string(os.PathSeparator)) { absOrigPath += string(os.PathSeparator) } // Need path w/o slash too to find matching dir w/o trailing slash absOrigPathNoSlash := absOrigPath[:len(absOrigPath)-1] for _, fileInfo := range b.context.GetSums() { absFile := filepath.Join(b.contextPath, fileInfo.Name()) // Any file in the context that starts with the given path will be // picked up and its hashcode used. However, we'll exclude the // root dir itself. We do this for a coupel of reasons: // 1 - ADD/COPY will not copy the dir itself, just its children // so there's no reason to include it in the hash calc // 2 - the metadata on the dir will change when any child file // changes. This will lead to a miss in the cache check if that // child file is in the .dockerignore list. if strings.HasPrefix(absFile, absOrigPath) && absFile != absOrigPathNoSlash { subfiles = append(subfiles, fileInfo.Sum()) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) ci.hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) return nil }
// Unpack unpacks the decompressedArchive to dest with options. func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { tr := tar.NewReader(decompressedArchive) trBuf := pools.BufioReader32KPool.Get(nil) defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header // Iterate through the files in the archive. loop: for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: // This keeps "..\" as-is, but normalizes "\..\" to "\". hdr.Name = filepath.Clean(hdr.Name) for _, exclude := range options.ExcludePatterns { if strings.HasPrefix(hdr.Name, exclude) { continue loop } } // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in // the filepath format for the OS on which the daemon is running. Hence // the check for a slash-suffix MUST be done in an OS-agnostic way. if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = system.MkdirAll(parentPath, 0777) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) rel, err := filepath.Rel(dest, path) if err != nil { return err } if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing directory with a non-directory from the archive. return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) } if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { // If NoOverwriteDirNonDir is true then we cannot replace // an existing non-directory with a directory from the archive. return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) } if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { return err } } return nil }
func (s *saveSession) save(outStream io.Writer) error { s.savedLayers = make(map[string]struct{}) // get image json tempDir, err := ioutil.TempDir("", "docker-export-") if err != nil { return err } defer os.RemoveAll(tempDir) s.outDir = tempDir reposLegacy := make(map[string]map[string]string) var manifest []manifestItem for id, imageDescr := range s.images { if err = s.saveImage(id); err != nil { return err } var repoTags []string var layers []string for _, ref := range imageDescr.refs { if _, ok := reposLegacy[ref.Name()]; !ok { reposLegacy[ref.Name()] = make(map[string]string) } reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] repoTags = append(repoTags, ref.String()) } for _, l := range imageDescr.layers { layers = append(layers, filepath.Join(l, legacyLayerFileName)) } manifest = append(manifest, manifestItem{ Config: digest.Digest(id).Hex() + ".json", RepoTags: repoTags, Layers: layers, }) } if len(reposLegacy) > 0 { reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { f.Close() return err } if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { return err } if err := f.Close(); err != nil { return err } if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } } manifestFileName := filepath.Join(tempDir, manifestFileName) f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { f.Close() return err } if err := json.NewEncoder(f).Encode(manifest); err != nil { return err } if err := f.Close(); err != nil { return err } if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { return err } fs, err := archive.Tar(tempDir, archive.Uncompressed) if err != nil { return err } defer fs.Close() if _, err := io.Copy(outStream, fs); err != nil { return err } return nil }
func mutateSampleDir(t *testing.T, root string) { // Remove a regular file if err := os.RemoveAll(path.Join(root, "file1")); err != nil { t.Fatal(err) } // Remove a directory if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { t.Fatal(err) } // Remove a symlink if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { t.Fatal(err) } // Rewrite a file if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { t.Fatal(err) } // Replace a file if err := os.RemoveAll(path.Join(root, "file3")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { t.Fatal(err) } // Touch file if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } // Replace file with dir if err := os.RemoveAll(path.Join(root, "file5")); err != nil { t.Fatal(err) } if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { t.Fatal(err) } // Create new file if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { t.Fatal(err) } // Create new dir if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { t.Fatal(err) } // Create a new symlink if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { t.Fatal(err) } // Change a symlink if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { t.Fatal(err) } if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { t.Fatal(err) } // Replace dir with file if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { t.Fatal(err) } // Touch dir if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { t.Fatal(err) } }