// applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func applyLayerHandler(dest string, layer archive.Reader, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) // Ensure it is a Windows-style volume path dest = longpath.AddPrefix(dest) if decompress { decompressed, err := archive.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() layer = decompressed } tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") if err != nil { return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) } s, err := archive.UnpackLayer(dest, layer) os.RemoveAll(tmpDir) if err != nil { return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) } return s, nil }
func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { // We use system.OpenSequential to use sequential file access on Windows, avoiding // depleting the standby list. On Linux, this equates to a regular os.Open. rawTar, err := system.OpenSequential(filename) if err != nil { logrus.Debugf("Error reading embedded tar: %v", err) return nil, err } defer rawTar.Close() var r io.Reader if progressOutput != nil { fileInfo, err := rawTar.Stat() if err != nil { logrus.Debugf("Error statting file: %v", err) return nil, err } r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") } else { r = rawTar } inflatedLayerData, err := archive.DecompressStream(r) if err != nil { return nil, err } defer inflatedLayerData.Close() if ds, ok := l.ls.(layer.DescribableStore); ok { return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) } return l.ls.Register(inflatedLayerData, rootFS.ChainID()) }
// applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func applyLayerHandler(dest string, layer archive.ArchiveReader, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() layer = decompressed } cmd := reexec.Command("docker-applyLayer", dest) cmd.Stdin = layer outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf if err = cmd.Run(); err != nil { return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) } // Stdout should be a valid JSON struct representing an applyLayerResponse. response := applyLayerResponse{} decoder := json.NewDecoder(outBuf) if err = decoder.Decode(&response); err != nil { return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) } return response.LayerSize, nil }
func (graph *Graph) disassembleAndApplyTarLayer(id, parent string, layerData archive.ArchiveReader, root string) (size int64, err error) { // this is saving the tar-split metadata mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return 0, err } mfz := gzip.NewWriter(mf) metaPacker := storage.NewJSONPacker(mfz) defer mf.Close() defer mfz.Close() inflatedLayerData, err := archive.DecompressStream(layerData) if err != nil { return 0, err } // we're passing nil here for the file putter, because the ApplyDiff will // handle the extraction of the archive rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil) if err != nil { return 0, err } if size, err = graph.driver.ApplyDiff(id, parent, archive.ArchiveReader(rdr)); err != nil { return 0, err } return }
func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { rawTar, err := os.Open(filename) if err != nil { logrus.Debugf("Error reading embedded tar: %v", err) return nil, err } defer rawTar.Close() var r io.Reader if progressOutput != nil { fileInfo, err := rawTar.Stat() if err != nil { logrus.Debugf("Error statting file: %v", err) return nil, err } r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") } else { r = rawTar } inflatedLayerData, err := archive.DecompressStream(r) if err != nil { return nil, err } defer inflatedLayerData.Close() if ds, ok := l.ls.(layer.DescribableStore); ok { return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) } return l.ls.Register(inflatedLayerData, rootFS.ChainID()) }
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { for _, l := range layers { b, err := dm.blobStore.New() if err != nil { return initialRootFS, nil, err } defer b.Close() rc, _, err := l.Download(ctx, progressOutput) if err != nil { return initialRootFS, nil, errors.Wrap(err, "failed to download") } defer rc.Close() r := io.TeeReader(rc, b) inflatedLayerData, err := archive.DecompressStream(r) if err != nil { return initialRootFS, nil, err } digester := digest.Canonical.New() if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { return initialRootFS, nil, err } initialRootFS.Append(layer.DiffID(digester.Digest())) d, err := b.Commit() if err != nil { return initialRootFS, nil, err } dm.blobs = append(dm.blobs, d) } return initialRootFS, nil, nil }
func (b *builder) readContext(context io.Reader) (err error) { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return } // Make sure we clean-up upon error. In the happy case the caller // is expected to manage the clean-up defer func() { if err != nil { if e := os.RemoveAll(tmpdirPath); e != nil { logrus.Debugf("[BUILDER] failed to remove temporary context: %s", e) } } }() decompressedStream, err := archive.DecompressStream(context) if err != nil { return } if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version1); err != nil { return } if err = chrootarchive.Untar(b.context, tmpdirPath, nil); err != nil { return } b.contextPath = tmpdirPath return }
func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, progressOutput progress.Output) (layer.Layer, error) { rawTar, err := os.Open(filename) if err != nil { logrus.Debugf("Error reading embedded tar: %v", err) return nil, err } defer rawTar.Close() inflatedLayerData, err := archive.DecompressStream(rawTar) if err != nil { return nil, err } defer inflatedLayerData.Close() if progressOutput != nil { fileInfo, err := os.Stat(filename) if err != nil { logrus.Debugf("Error statting file: %v", err) return nil, err } progressReader := progress.NewProgressReader(inflatedLayerData, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") return l.ls.Register(progressReader, rootFS.ChainID()) } return l.ls.Register(inflatedLayerData, rootFS.ChainID()) }
// Untar reads a stream of bytes from `archive`, parses it as a tar archive, // and unpacks it into the directory at `dest`. // The archive may be compressed with one of the following algorithms: // identity (uncompressed), gzip, bzip2, xz. func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := system.MkdirAll(dest, 0777); err != nil { return err } } decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() return invokeUnpack(decompressedArchive, dest, options) }
// MakeTarSumContext returns a build Context from a tar stream. // // It extracts the tar stream to a temporary folder that is deleted as soon as // the Context is closed. // As the extraction happens, a tarsum is calculated for every file, and the set of // all those sums then becomes the source of truth for all operations on this Context. // // Closing tarStream has to be done by the caller. func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { root, err := ioutils.TempDir("", "docker-builder") if err != nil { return nil, err } tsc := &tarSumContext{root: root} // Make sure we clean-up upon error. In the happy case the caller // is expected to manage the clean-up defer func() { if err != nil { tsc.Close() } }() decompressedStream, err := archive.DecompressStream(tarStream) if err != nil { return nil, err } sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) if err != nil { return nil, err } if err := chrootarchive.Untar(sum, root, nil); err != nil { return nil, err } tsc.sums = sum.GetSums() return tsc, nil }
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := os.MkdirAll(dest, 0777); err != nil { return err } } decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() // We can't pass a potentially large exclude list directly via cmd line // because we easily overrun the kernel's max argument/environment size // when the full image list is passed (e.g. when this is used by // `docker load`). We will marshall the options via a pipe to the // child r, w, err := os.Pipe() if err != nil { return fmt.Errorf("Untar pipe failure: %v", err) } cmd := reexec.Command("docker-untar", dest) cmd.Stdin = decompressedArchive cmd.ExtraFiles = append(cmd.ExtraFiles, r) var output bytes.Buffer cmd.Stdout = &output cmd.Stderr = &output if err := cmd.Start(); err != nil { return fmt.Errorf("Untar error on re-exec cmd: %v", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { return fmt.Errorf("Untar json encode to pipe failed: %v", err) } w.Close() if err := cmd.Wait(); err != nil { return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) } return nil }
// StoreImage stores file system layer data for the given image to the // image's registered storage driver. Image metadata is stored in a file // at the specified root directory. This function also computes the TarSum // of `layerData` (currently using tarsum.dev). func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( size int64 err error driver = img.graph.Driver() layerTarSum tarsum.TarSum ) // If layerData is not nil, unpack it into the new layer if layerData != nil { // If the image doesn't have a checksum, we should add it. The layer // checksums are verified when they are pulled from a remote, but when // a container is committed it should be added here. if img.Checksum == "" { layerDataDecompressed, err := archive.DecompressStream(layerData) if err != nil { return err } defer layerDataDecompressed.Close() if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.VersionDev); err != nil { return err } if size, err = driver.ApplyDiff(img.ID, img.Parent, layerTarSum); err != nil { return err } img.Checksum = layerTarSum.Sum(nil) } else if size, err = driver.ApplyDiff(img.ID, img.Parent, layerData); err != nil { return err } } img.Size = size if err := img.SaveSize(root); err != nil { return err } f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(img) }
// StoreImage stores file system layer data for the given image to the // image's registered storage driver. Image metadata is stored in a file // at the specified root directory. This function also computes the TarSum // of `layerData` (currently using tarsum.dev). func StoreImage(img *Image, layerData archive.ArchiveReader, root string) error { // Store the layer var ( size int64 err error driver = img.graph.Driver() layerTarSum tarsum.TarSum ) // If layerData is not nil, unpack it into the new layer if layerData != nil { layerDataDecompressed, err := archive.DecompressStream(layerData) if err != nil { return err } defer layerDataDecompressed.Close() if layerTarSum, err = tarsum.NewTarSum(layerDataDecompressed, true, tarsum.VersionDev); err != nil { return err } if size, err = driver.ApplyDiff(img.ID, img.Parent, layerTarSum); err != nil { return err } checksum := layerTarSum.Sum(nil) if img.Checksum != "" && img.Checksum != checksum { log.Warnf("image layer checksum mismatch: computed %q, expected %q", checksum, img.Checksum) } img.Checksum = checksum } img.Size = size if err := img.SaveSize(root); err != nil { return err } f, err := os.OpenFile(jsonPath(root), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(img) }
func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS) (layer.Layer, error) { rawTar, err := os.Open(filename) if err != nil { logrus.Debugf("Error reading embedded tar: %v", err) return nil, err } inflatedLayerData, err := archive.DecompressStream(rawTar) if err != nil { return nil, err } defer rawTar.Close() defer inflatedLayerData.Close() return l.ls.Register(inflatedLayerData, rootFS.ChainID()) }
func (b blobChanger) addFile(rw http.ResponseWriter, r *http.Request) { recorder := httptest.NewRecorder() b.Handler.ServeHTTP(recorder, r) inflated, err := archive.DecompressStream(bytes.NewReader(recorder.Body.Bytes())) if err != nil { logrus.Errorf("Error decompressing: %s", err) http.Error(rw, "Error handling tar stream in proxy", 500) return } copied := bytes.NewBuffer(nil) deflater, err := archive.CompressStream(writeCloser{copied}, archive.Gzip) if err != nil { logrus.Errorf("Error compressing: %s", err) http.Error(rw, "Error handling tar stream in proxy", 500) return } tw := tar.NewWriter(deflater) if err := addFile(tw, "/etc/malicious.txt", []byte("#Bad bad stuff")); err != nil { logrus.Errorf("Error adding file: %s", err) http.Error(rw, "Error handling tar stream in proxy", 500) return } if err := tarCopy(tw, tar.NewReader(inflated)); err != nil { logrus.Errorf("Error copying: %s", err) http.Error(rw, "Error handling tar stream in proxy", 500) return } recorder.Header().Set("Content-Length", strconv.Itoa(len(copied.Bytes()))) copyHeader(rw.Header(), recorder.Header()) rw.WriteHeader(recorder.Code) n, err := rw.Write(copied.Bytes()) if err != nil { logrus.Errorf("Error writing: %s", err) return } if n != copied.Len() { logrus.Errorf("Short write: wrote %d, expected %d", n, copied.Len()) } }
// applyLayerHandler parses a diff in the standard layer format from `layer`, and // applies it to the directory `dest`. Returns the size in bytes of the // contents of the layer. func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { dest = filepath.Clean(dest) if decompress { decompressed, err := archive.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() layer = decompressed } if options == nil { options = &archive.TarOptions{} if rsystem.RunningInUserNS() { options.InUserNS = true } } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } data, err := json.Marshal(options) if err != nil { return 0, fmt.Errorf("ApplyLayer json encode: %v", err) } cmd := reexec.Command("docker-applyLayer", dest) cmd.Stdin = layer cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) cmd.Stdout, cmd.Stderr = outBuf, errBuf if err = cmd.Run(); err != nil { return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) } // Stdout should be a valid JSON struct representing an applyLayerResponse. response := applyLayerResponse{} decoder := json.NewDecoder(outBuf) if err = decoder.Decode(&response); err != nil { return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) } return response.LayerSize, nil }
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := os.MkdirAll(dest, 0777); err != nil { return err } } // We can't pass the exclude list directly via cmd line // because we easily overrun the shell max argument list length // when the full image list is passed (e.g. when this is used // by `docker load`). Instead we will add the JSON marshalled // and placed in the env, which has significantly larger // max size data, err := json.Marshal(options) if err != nil { return fmt.Errorf("Untar json encode: %v", err) } decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() cmd := reexec.Command("docker-untar", dest) cmd.Stdin = decompressedArchive cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out) } return nil }
func ApplyLayer(dest string, layer archive.ArchiveReader) (size int64, err error) { dest = filepath.Clean(dest) decompressed, err := archive.DecompressStream(layer) if err != nil { return 0, err } defer decompressed.Close() tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") if err != nil { return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) } s, err := archive.UnpackLayer(dest, decompressed) os.RemoveAll(tmpDir) if err != nil { return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) } return s, nil }
func (b *Builder) readContext(context io.Reader) error { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return err } decompressedStream, err := archive.DecompressStream(context) if err != nil { return err } if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { return err } if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { return err } b.contextPath = tmpdirPath return nil }
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } var ( buf bytes.Buffer enc = json.NewEncoder(&buf) ) if err := enc.Encode(options); err != nil { return fmt.Errorf("Untar json encode: %v", err) } if _, err := os.Stat(dest); os.IsNotExist(err) { if err := os.MkdirAll(dest, 0777); err != nil { return err } } dest = filepath.Clean(dest) decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() cmd := reexec.Command("docker-untar", dest, buf.String()) cmd.Stdin = decompressedArchive out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out) } return nil }
func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData io.Reader, root string) (err error) { var ar io.Reader if graph.tarSplitDisabled { ar = layerData } else { // this is saving the tar-split metadata mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) if err != nil { return err } mfz := gzip.NewWriter(mf) metaPacker := storage.NewJSONPacker(mfz) defer mf.Close() defer mfz.Close() inflatedLayerData, err := archive.DecompressStream(layerData) if err != nil { return err } // we're passing nil here for the file putter, because the ApplyDiff will // handle the extraction of the archive rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil) if err != nil { return err } ar = archive.Reader(rdr) } if img.Size, err = graph.driver.ApplyDiff(img.ID, img.Parent, ar); err != nil { return err } return nil }
func CreateNewProject(name string, reader io.Reader) (*Project, error) { project := &Project{ Name: name, } exists, err := project.Exists() if err != nil { return nil, err } else if exists == true { return nil, errors.New("Project already exists") } decompressedArchive, err := archive.DecompressStream(reader) if err != nil { return nil, err } defer decompressedArchive.Close() project.createDirectory() archive.Unpack(decompressedArchive, basePath+name, &archive.TarOptions{}) return project, nil }
// Handler for teasing out the automatic decompression func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) if err != nil { return err } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { return err } } r := ioutil.NopCloser(tarArchive) if decompress { decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } defer decompressedArchive.Close() r = decompressedArchive } return invokeUnpack(r, dest, options) }
// FetchImageBlob fetches the image blob func FetchImageBlob(ctx context.Context, options Options, image *ImageWithMeta, progressOutput progress.Output) (string, error) { defer trace.End(trace.Begin(options.Image + "/" + image.Layer.BlobSum)) id := image.ID layer := image.Layer.BlobSum meta := image.Meta diffID := "" url, err := url.Parse(options.Registry) if err != nil { return diffID, err } url.Path = path.Join(url.Path, options.Image, "blobs", layer) log.Debugf("URL: %s\n ", url) fetcher := urlfetcher.NewURLFetcher(urlfetcher.Options{ Timeout: options.Timeout, Username: options.Username, Password: options.Password, Token: options.Token, InsecureSkipVerify: options.InsecureSkipVerify, }) // ctx ctx, cancel := context.WithTimeout(ctx, options.Timeout) defer cancel() imageFileName, err := fetcher.Fetch(ctx, url, true, progressOutput, image.String()) if err != nil { return diffID, err } // Cleanup function for the error case defer func() { if err != nil { os.Remove(imageFileName) } }() // Open the file so that we can use it as a io.Reader for sha256 calculation imageFile, err := os.Open(string(imageFileName)) if err != nil { return diffID, err } defer imageFile.Close() // blobSum is the sha of the compressed layer blobSum := sha256.New() // diffIDSum is the sha of the uncompressed layer diffIDSum := sha256.New() // blobTr is an io.TeeReader that writes bytes to blobSum that it reads from imageFile // see https://golang.org/pkg/io/#TeeReader blobTr := io.TeeReader(imageFile, blobSum) progress.Update(progressOutput, image.String(), "Verifying Checksum") decompressedTar, err := archive.DecompressStream(blobTr) if err != nil { return diffID, err } // Copy bytes from decompressed layer into diffIDSum to calculate diffID _, cerr := io.Copy(diffIDSum, decompressedTar) if cerr != nil { return diffID, cerr } bs := fmt.Sprintf("sha256:%x", blobSum.Sum(nil)) if bs != layer { return diffID, fmt.Errorf("Failed to validate layer checksum. Expected %s got %s", layer, bs) } diffID = fmt.Sprintf("sha256:%x", diffIDSum.Sum(nil)) // this isn't an empty layer, so we need to calculate the size if diffID != string(DigestSHA256EmptyTar) { var layerSize int64 // seek to the beginning of the file imageFile.Seek(0, 0) // recreate the decompressed tar Reader decompressedTar, err := archive.DecompressStream(imageFile) if err != nil { return "", err } // get a tar reader for access to the files in the archive tr := tar.NewReader(decompressedTar) // iterate through tar headers to get file sizes for { tarHeader, err := tr.Next() if err == io.EOF { break } if err != nil { return "", err } layerSize += tarHeader.Size } image.Size = layerSize } log.Infof("diffID for layer %s: %s", id, diffID) // Ensure the parent directory exists destination := path.Join(DestinationDirectory(options), id) err = os.MkdirAll(destination, 0755) /* #nosec */ if err != nil { return diffID, err } // Move(rename) the temporary file to its final destination err = os.Rename(string(imageFileName), path.Join(destination, id+".tar")) if err != nil { return diffID, err } // Dump the history next to it err = ioutil.WriteFile(path.Join(destination, id+".json"), []byte(meta), 0644) if err != nil { return diffID, err } progress.Update(progressOutput, image.String(), "Download complete") return diffID, nil }
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { if tarArchive == nil { return fmt.Errorf("Empty archive") } if options == nil { options = &archive.TarOptions{} } if options.ExcludePatterns == nil { options.ExcludePatterns = []string{} } dest = filepath.Clean(dest) if _, err := os.Stat(dest); os.IsNotExist(err) { if err := system.MkdirAll(dest, 0777); err != nil { return err } } decompressedArchive, err := archive.DecompressStream(tarArchive) if err != nil { return err } var data []byte var r, w *os.File defer decompressedArchive.Close() if runtime.GOOS != "windows" { // We can't pass a potentially large exclude list directly via cmd line // because we easily overrun the kernel's max argument/environment size // when the full image list is passed (e.g. when this is used by // `docker load`). We will marshall the options via a pipe to the // child // This solution won't work on Windows as it will fail in golang // exec_windows.go as at the lowest layer because attr.Files > 3 r, w, err = os.Pipe() if err != nil { return fmt.Errorf("Untar pipe failure: %v", err) } } else { // We can't pass the exclude list directly via cmd line // because we easily overrun the shell max argument list length // when the full image list is passed (e.g. when this is used // by `docker load`). Instead we will add the JSON marshalled // and placed in the env, which has significantly larger // max size data, err = json.Marshal(options) if err != nil { return fmt.Errorf("Untar json encode: %v", err) } } cmd := reexec.Command("docker-untar", dest) cmd.Stdin = decompressedArchive if runtime.GOOS != "windows" { cmd.ExtraFiles = append(cmd.ExtraFiles, r) output := bytes.NewBuffer(nil) cmd.Stdout = output cmd.Stderr = output if err := cmd.Start(); err != nil { return fmt.Errorf("Untar error on re-exec cmd: %v", err) } //write the options to the pipe for the untar exec to read if err := json.NewEncoder(w).Encode(options); err != nil { return fmt.Errorf("Untar json encode to pipe failed: %v", err) } w.Close() if err := cmd.Wait(); err != nil { return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) } return nil } else { cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) out, err := cmd.CombinedOutput() if err != nil { return fmt.Errorf("Untar %s %s", err, out) } return nil } }
func (p *v2Puller) pullV2Tag(out io.Writer, ref reference.Named) (tagUpdated bool, err error) { tagOrDigest := "" if tagged, isTagged := ref.(reference.Tagged); isTagged { tagOrDigest = tagged.Tag() } else if digested, isDigested := ref.(reference.Digested); isDigested { tagOrDigest = digested.Digest().String() } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) } logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest) manSvc, err := p.repo.Manifests(context.Background()) if err != nil { return false, err } unverifiedManifest, err := manSvc.GetByTag(tagOrDigest) if err != nil { return false, err } if unverifiedManifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } var verifiedManifest *schema1.Manifest verifiedManifest, err = verifyManifest(unverifiedManifest, ref) if err != nil { return false, err } rootFS := image.NewRootFS() if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { return false, err } // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return false, err } out.Write(p.sf.FormatStatus(tagOrDigest, "Pulling from %s", p.repo.Name())) var downloads []*downloadInfo defer func() { for _, d := range downloads { p.config.Pool.removeWithError(d.poolKey, err) if d.tmpFile != nil { d.tmpFile.Close() if err := os.RemoveAll(d.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name()) } } } }() // Image history converted to the new format var history []image.History poolKey := "v2layer:" notFoundLocally := false // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum poolKey += blobSum.String() var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return false, err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return false, err } history = append(history, h) if throwAway.ThrowAway { continue } // Do we have a layer on disk corresponding to the set of // blobsums up to this point? if !notFoundLocally { notFoundLocally = true diffID, err := p.blobSumService.GetDiffID(blobSum) if err == nil { rootFS.Append(diffID) if l, err := p.config.LayerStore.Get(rootFS.ChainID()); err == nil { notFoundLocally = false logrus.Debugf("Layer already exists: %s", blobSum.String()) out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Already exists", nil)) defer layer.ReleaseAndLog(p.config.LayerStore, l) continue } else { rootFS.DiffIDs = rootFS.DiffIDs[:len(rootFS.DiffIDs)-1] } } } out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Pulling fs layer", nil)) tmpFile, err := ioutil.TempFile("", "GetImageBlob") if err != nil { return false, err } d := &downloadInfo{ poolKey: poolKey, digest: blobSum, tmpFile: tmpFile, // TODO: seems like this chan buffer solved hanging problem in go1.5, // this can indicate some deeper problem that somehow we never take // error from channel in loop below err: make(chan error, 1), } downloads = append(downloads, d) broadcaster, found := p.config.Pool.add(d.poolKey) broadcaster.Add(out) d.broadcaster = broadcaster if found { d.err <- nil } else { go p.download(d) } } for _, d := range downloads { if err := <-d.err; err != nil { return false, err } if d.layer == nil { // Wait for a different pull to download and extract // this layer. err = d.broadcaster.Wait() if err != nil { return false, err } diffID, err := p.blobSumService.GetDiffID(d.digest) if err != nil { return false, err } rootFS.Append(diffID) l, err := p.config.LayerStore.Get(rootFS.ChainID()) if err != nil { return false, err } defer layer.ReleaseAndLog(p.config.LayerStore, l) continue } d.tmpFile.Seek(0, 0) reader := progressreader.New(progressreader.Config{ In: d.tmpFile, Out: d.broadcaster, Formatter: p.sf, Size: d.size, NewLines: false, ID: stringid.TruncateID(d.digest.String()), Action: "Extracting", }) inflatedLayerData, err := archive.DecompressStream(reader) if err != nil { return false, fmt.Errorf("could not get decompression stream: %v", err) } l, err := p.config.LayerStore.Register(inflatedLayerData, rootFS.ChainID()) if err != nil { return false, fmt.Errorf("failed to register layer: %v", err) } logrus.Debugf("layer %s registered successfully", l.DiffID()) rootFS.Append(l.DiffID()) // Cache mapping from this layer's DiffID to the blobsum if err := p.blobSumService.Add(l.DiffID(), d.digest); err != nil { return false, err } defer layer.ReleaseAndLog(p.config.LayerStore, l) d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.digest.String()), "Pull complete", nil)) d.broadcaster.Close() tagUpdated = true } config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history) if err != nil { return false, err } imageID, err := p.config.ImageStore.Create(config) if err != nil { return false, err } manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName.Name()) if err != nil { return false, err } // Check for new tag if no layers downloaded var oldTagImageID image.ID if !tagUpdated { oldTagImageID, err = p.config.TagStore.Get(ref) if err != nil || oldTagImageID != imageID { tagUpdated = true } } if tagUpdated { if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.TagStore.AddDigest(canonical, imageID, true); err != nil { return false, err } } else if err = p.config.TagStore.AddTag(ref, imageID, true); err != nil { return false, err } } if manifestDigest != "" { out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest)) } return tagUpdated, nil }
// makeDownloadFunc returns a function that performs the layer download and // registration. If parentDownload is non-nil, it waits for that download to // complete before the registration step, and registers the downloaded data // on top of parentDownload's resulting layer. Otherwise, it registers the // layer on top of the ChainID given by parentLayer. func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { d := &downloadTransfer{ Transfer: NewTransfer(), layerStore: ldm.layerStore, } go func() { defer func() { close(progressChan) }() progressOutput := progress.ChanOutput(progressChan) select { case <-start: default: progress.Update(progressOutput, descriptor.ID(), "Waiting") <-start } if parentDownload != nil { // Did the parent download already fail or get // cancelled? select { case <-parentDownload.Done(): _, err := parentDownload.result() if err != nil { d.err = err return } default: } } var ( downloadReader io.ReadCloser size int64 err error retries int ) defer descriptor.Close() for { downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) if err == nil { break } // If an error was returned because the context // was cancelled, we shouldn't retry. select { case <-d.Transfer.Context().Done(): d.err = err return default: } retries++ if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { logrus.Errorf("Download failed: %v", err) d.err = err return } logrus.Errorf("Download failed, retrying: %v", err) delay := retries * 5 ticker := time.NewTicker(time.Second) selectLoop: for { progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) select { case <-ticker.C: delay-- if delay == 0 { ticker.Stop() break selectLoop } case <-d.Transfer.Context().Done(): ticker.Stop() d.err = errors.New("download cancelled during retry delay") return } } } close(inactive) if parentDownload != nil { select { case <-d.Transfer.Context().Done(): d.err = errors.New("layer registration cancelled") downloadReader.Close() return case <-parentDownload.Done(): } l, err := parentDownload.result() if err != nil { d.err = err downloadReader.Close() return } parentLayer = l.ChainID() } reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") defer reader.Close() inflatedLayerData, err := archive.DecompressStream(reader) if err != nil { d.err = fmt.Errorf("could not get decompression stream: %v", err) return } var src distribution.Descriptor if fs, ok := descriptor.(distribution.Describable); ok { src = fs.Descriptor() } if ds, ok := d.layerStore.(layer.DescribableStore); ok { d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) } else { d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) } if err != nil { select { case <-d.Transfer.Context().Done(): d.err = errors.New("layer registration cancelled") default: d.err = fmt.Errorf("failed to register layer: %v", err) } return } progress.Update(progressOutput, descriptor.ID(), "Pull complete") withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) if hasRegistered { withRegistered.Registered(d.layer.DiffID()) } // Doesn't actually need to be its own goroutine, but // done like this so we can defer close(c). go func() { <-d.Transfer.Released() if d.layer != nil { layer.ReleaseAndLog(d.layerStore, d.layer) } }() }() return d } }
// ImportImage imports an image, getting the archived layer data either from // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { var ( sf = streamformatter.NewJSONStreamFormatter() rc io.ReadCloser resp *http.Response newRef reference.Named ) if repository != "" { var err error newRef, err = reference.ParseNamed(repository) if err != nil { return err } if _, isCanonical := newRef.(reference.Canonical); isCanonical { return errors.New("cannot import digest reference") } if tag != "" { newRef, err = reference.WithTag(newRef, tag) if err != nil { return err } } } config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) if err != nil { return err } if src == "-" { rc = inConfig } else { inConfig.Close() u, err := url.Parse(src) if err != nil { return err } if u.Scheme == "" { u.Scheme = "http" u.Host = src u.Path = "" } outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) resp, err = httputils.Download(u.String()) if err != nil { return err } progressOutput := sf.NewProgressOutput(outStream, true) rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") } defer rc.Close() if len(msg) == 0 { msg = "Imported from " + src } inflatedLayerData, err := archive.DecompressStream(rc) if err != nil { return err } // TODO: support windows baselayer? l, err := daemon.layerStore.Register(inflatedLayerData, "") if err != nil { return err } defer layer.ReleaseAndLog(daemon.layerStore, l) created := time.Now().UTC() imgConfig, err := json.Marshal(&image.Image{ V1Image: image.V1Image{ DockerVersion: dockerversion.Version, Config: config, Architecture: runtime.GOARCH, OS: runtime.GOOS, Created: created, Comment: msg, }, RootFS: &image.RootFS{ Type: "layers", DiffIDs: []layer.DiffID{l.DiffID()}, }, History: []image.History{{ Created: created, Comment: msg, }}, }) if err != nil { return err } id, err := daemon.imageStore.Create(imgConfig) if err != nil { return err } // FIXME: connect with commit code and call refstore directly if newRef != nil { if err := daemon.TagImageWithReference(id, newRef); err != nil { return err } } daemon.LogImageEvent(id.String(), id.String(), "import") outStream.Write(sf.FormatStatus("", id.String())) return nil }
func (p *v1Puller) downloadLayer(out io.Writer, v1LayerID, endpoint string, parentID layer.ChainID, layerSize int64, layersDownloaded *bool) (l layer.Layer, err error) { // ensure no two downloads of the same layer happen at the same time poolKey := "layer:" + v1LayerID broadcaster, found := p.config.Pool.add(poolKey) broadcaster.Add(out) if found { logrus.Debugf("Image (id: %s) pull is already running, skipping", v1LayerID) if err = broadcaster.Wait(); err != nil { return nil, err } layerID, err := p.v1IDService.Get(v1LayerID, p.repoInfo.Index.Name) if err != nil { return nil, err } // Does the layer actually exist l, err := p.config.LayerStore.Get(layerID) if err != nil { return nil, err } return l, nil } // This must use a closure so it captures the value of err when // the function returns, not when the 'defer' is evaluated. defer func() { p.config.Pool.removeWithError(poolKey, err) }() retries := 5 for j := 1; j <= retries; j++ { // Get the layer status := "Pulling fs layer" if j > 1 { status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) } broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), status, nil)) layerReader, err := p.session.GetRemoteImageLayer(v1LayerID, endpoint, layerSize) if uerr, ok := err.(*url.Error); ok { err = uerr.Err } if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error pulling dependent layers", nil)) return nil, err } *layersDownloaded = true defer layerReader.Close() reader := progressreader.New(progressreader.Config{ In: layerReader, Out: broadcaster, Formatter: p.sf, Size: layerSize, NewLines: false, ID: stringid.TruncateID(v1LayerID), Action: "Downloading", }) inflatedLayerData, err := archive.DecompressStream(reader) if err != nil { return nil, fmt.Errorf("could not get decompression stream: %v", err) } l, err := p.config.LayerStore.Register(inflatedLayerData, parentID) if err != nil { return nil, fmt.Errorf("failed to register layer: %v", err) } logrus.Debugf("layer %s registered successfully", l.DiffID()) if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Error downloading dependent layers", nil)) return nil, err } // Cache mapping from this v1 ID to content-addressable layer ID if err := p.v1IDService.Set(v1LayerID, p.repoInfo.Index.Name, l.ChainID()); err != nil { return nil, err } broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(v1LayerID), "Download complete", nil)) broadcaster.Close() return l, nil } // not reached return nil, nil }