// SetTar adds data to db from a tar strema decoded from `src`. // Raw data is stored at the key `_fs_data/', and metadata in a // separate key '_fs_metadata'. func (db *DB) SetTar(src io.Reader) error { tr := tar.NewReader(src) for { hdr, err := tr.Next() if err == io.EOF { break } if err != nil { return err } fmt.Printf("[META] %s\n", hdr.Name) metaBlob, err := headerReader(hdr) if err != nil { return err } fmt.Printf(" ---> storing metadata in %s\n", metaPath(hdr.Name)) if err := db.SetStream(metaPath(hdr.Name), metaBlob); err != nil { continue } // FIXME: git can carry symlinks as well if hdr.Typeflag == tar.TypeReg { fmt.Printf("[DATA] %s %d bytes\n", hdr.Name, hdr.Size) if err := db.SetStream(path.Join("_fs_data", hdr.Name), tr); err != nil { continue } } } return nil }
// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } }
// GetTar generates a tar stream frmo the contents of db, and streams // it to `dst`. func (db *DB) GetTar(dst io.Writer) error { tw := tar.NewWriter(dst) defer tw.Close() // Walk the data tree return db.Walk(DataTree, func(name string, obj git.Object) error { fmt.Fprintf(os.Stderr, "Generating tar entry for '%s'...\n", name) metaBlob, err := db.Get(metaPath(name)) if err != nil { return err } tr := tar.NewReader(bytes.NewReader([]byte(metaBlob))) hdr, err := tr.Next() if err != nil { return err } // Write the reconstituted tar header+content if err := tw.WriteHeader(hdr); err != nil { return err } if blob, isBlob := obj.(*git.Blob); isBlob { fmt.Fprintf(os.Stderr, "--> writing %d bytes for blob %s\n", hdr.Size, hdr.Name) if _, err := tw.Write(blob.Contents()[:hdr.Size]); err != nil { return err } } return nil }) return nil }
func TestPostContainersCopy(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test.txt"}, }, t, ) containerRun(eng, containerID, t) r := httptest.NewRecorder() var copyData engine.Env copyData.Set("Resource", "/test.txt") copyData.Set("HostPath", ".") jsonData := bytes.NewBuffer(nil) if err := copyData.Encode(jsonData); err != nil { t.Fatal(err) } req, err := http.NewRequest("POST", "/containers/"+containerID+"/copy", jsonData) if err != nil { t.Fatal(err) } req.Header.Add("Content-Type", "application/json") if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } found := false for tarReader := tar.NewReader(r.Body); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } t.Fatal(err) } if h.Name == "test.txt" { found = true break } } if !found { t.Fatalf("The created test file has not been found in the copied output") } }
func IsArchive(header []byte) bool { compression := DetectCompression(header) if compression != Uncompressed { return true } r := tar.NewReader(bytes.NewBuffer(header)) _, err := r.Next() return err == nil }
func TestGetContainersExport(t *testing.T) { eng := NewTestEngine(t) defer mkRuntimeFromEngine(eng, t).Nuke() // Create a container and remove a file containerID := createTestContainer(eng, &runconfig.Config{ Image: unitTestImageID, Cmd: []string{"touch", "/test"}, }, t, ) containerRun(eng, containerID, t) r := httptest.NewRecorder() req, err := http.NewRequest("GET", "/containers/"+containerID+"/export", nil) if err != nil { t.Fatal(err) } if err := api.ServeRequest(eng, api.APIVERSION, r, req); err != nil { t.Fatal(err) } assertHttpNotError(r, t) if r.Code != http.StatusOK { t.Fatalf("%d OK expected, received %d\n", http.StatusOK, r.Code) } found := false for tarReader := tar.NewReader(r.Body); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } t.Fatal(err) } if h.Name == "test" { found = true break } } if !found { t.Fatalf("The created test file has not been found in the exported image") } }
// Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `path`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // FIXME: specify behavior when target path exists vs. doesn't exist. func Untar(archive io.Reader, dest string, options *TarOptions) error { if archive == nil { return fmt.Errorf("Empty archive") } decompressedArchive, err := DecompressStream(archive) if err != nil { return err } defer decompressedArchive.Close() tr := tar.NewReader(decompressedArchive) var dirs []*tar.Header // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 0777) if err != nil { return err } } } path := filepath.Join(dest, hdr.Name) // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } if err := createTarFile(path, dest, hdr, tr); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := syscall.UtimesNano(path, ts); err != nil { return err } } return nil }
// ApplyLayer parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. func ApplyLayer(dest string, layer ArchiveReader) error { // We need to be able to set any perms oldmask := syscall.Umask(0) defer syscall.Umask(oldmask) layer, err := DecompressStream(layer) if err != nil { return err } tr := tar.NewReader(layer) trBuf := bufio.NewReaderSize(nil, trBufSize) var dirs []*tar.Header aufsTempdir := "" aufsHardlinks := make(map[string]*tar.Header) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists. // This happened in some tests where an image had a tarfile without any // parent directories. parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 0600) if err != nil { return err } } } // Skip AUFS metadata dirs if strings.HasPrefix(hdr.Name, ".wh..wh.") { // Regular files inside /.wh..wh.plnk can be used as hardlink targets // We don't want this directory, but we need the files in them so that // such hardlinks can be resolved. if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { basename := filepath.Base(hdr.Name) aufsHardlinks[basename] = hdr if aufsTempdir == "" { if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { return err } defer os.RemoveAll(aufsTempdir) } if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil { return err } } continue } path := filepath.Join(dest, hdr.Name) base := filepath.Base(path) if strings.HasPrefix(base, ".wh.") { originalBase := base[len(".wh."):] originalPath := filepath.Join(filepath.Dir(path), originalBase) if err := os.RemoveAll(originalPath); err != nil { return err } } else { // If path exits we almost always just want to remove and replace it. // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } trBuf.Reset(tr) srcData := io.Reader(trBuf) srcHdr := hdr // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so // we manually retarget these into the temporary files we extracted them into if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { linkBasename := filepath.Base(hdr.Linkname) srcHdr = aufsHardlinks[linkBasename] if srcHdr == nil { return fmt.Errorf("Invalid aufs hardlink") } tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) if err != nil { return err } defer tmpFile.Close() srcData = tmpFile } if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil { return err } // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := syscall.UtimesNano(path, ts); err != nil { return err } } return nil }
// Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. // If `dest` does not exist, it is created unless there are multiple entries in `archive`. // In the latter case, an error is returned. // If `dest` is an existing file, it gets overwritten. // If `dest` is an existing directory, its files get merged (with overwrite for conflicting files). func Untar(archive io.Reader, dest string, options *TarOptions) error { if archive == nil { return fmt.Errorf("Empty archive") } decompressedArchive, err := DecompressStream(archive) if err != nil { return err } defer decompressedArchive.Close() tr := tar.NewReader(decompressedArchive) var ( dirs []*tar.Header create bool multipleEntries bool ) if fi, err := os.Lstat(dest); err != nil { if !os.IsNotExist(err) { return err } // destination does not exist, so it is assumed it has to be created. create = true } else if !fi.IsDir() { // destination exists and is not a directory, so it will be overwritten. create = true } // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { return err } // Return an error if destination needs to be created and there is more than 1 entry in the tar stream. if create && multipleEntries { return fmt.Errorf("Trying to untar an archive with multiple entries to an inexistant target `%s`: did you mean `%s` instead?", dest, filepath.Dir(dest)) } // Normalize name, for safety and for a simple is-root check hdr.Name = filepath.Clean(hdr.Name) if !strings.HasSuffix(hdr.Name, "/") { // Not the root directory, ensure that the parent directory exists parent := filepath.Dir(hdr.Name) parentPath := filepath.Join(dest, parent) if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { err = os.MkdirAll(parentPath, 0777) if err != nil { return err } } } var path string if create { path = dest // we are renaming hdr.Name to dest } else { path = filepath.Join(dest, hdr.Name) } // If path exits we almost always just want to remove and replace it // The only exception is when it is a directory *and* the file from // the layer is also a directory. Then we want to merge them (i.e. // just apply the metadata from the layer). if fi, err := os.Lstat(path); err == nil { if fi.IsDir() && hdr.Name == "." { continue } if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { if err := os.RemoveAll(path); err != nil { return err } } } if err := createTarFile(path, dest, hdr, tr, options == nil || !options.NoLchown); err != nil { return err } // Successfully added an entry. Predicting multiple entries for next iteration (not current one). multipleEntries = true // Directory mtimes must be handled at the end to avoid further // file creation in them to modify the directory mtime if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } } for _, hdr := range dirs { path := filepath.Join(dest, hdr.Name) ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} if err := syscall.UtimesNano(path, ts); err != nil { return err } } return nil }
func (ts *TarSum) Read(buf []byte) (int, error) { if ts.gz == nil { ts.bufTar = bytes.NewBuffer([]byte{}) ts.bufGz = bytes.NewBuffer([]byte{}) ts.tarR = tar.NewReader(ts.Reader) ts.tarW = tar.NewWriter(ts.bufTar) if !ts.DisableCompression { ts.gz = gzip.NewWriter(ts.bufGz) } else { ts.gz = &nopCloseFlusher{Writer: ts.bufGz} } ts.h = sha256.New() ts.h.Reset() ts.first = true ts.sums = make(map[string]string) } if ts.finished { return ts.bufGz.Read(buf) } buf2 := make([]byte, len(buf), cap(buf)) n, err := ts.tarR.Read(buf2) if err != nil { if err == io.EOF { if _, err := ts.h.Write(buf2[:n]); err != nil { return 0, err } if !ts.first { ts.sums[ts.currentFile] = hex.EncodeToString(ts.h.Sum(nil)) ts.h.Reset() } else { ts.first = false } currentHeader, err := ts.tarR.Next() if err != nil { if err == io.EOF { if err := ts.gz.Close(); err != nil { return 0, err } ts.finished = true return n, nil } return n, err } ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") if err := ts.encodeHeader(currentHeader); err != nil { return 0, err } if err := ts.tarW.WriteHeader(currentHeader); err != nil { return 0, err } if _, err := ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() if _, err := io.Copy(ts.gz, ts.bufTar); err != nil { return 0, err } ts.gz.Flush() return ts.bufGz.Read(buf) } return n, err } // Filling the hash buffer if _, err = ts.h.Write(buf2[:n]); err != nil { return 0, err } // Filling the tar writter if _, err = ts.tarW.Write(buf2[:n]); err != nil { return 0, err } ts.tarW.Flush() // Filling the gz writter if _, err = io.Copy(ts.gz, ts.bufTar); err != nil { return 0, err } ts.gz.Flush() return ts.bufGz.Read(buf) }