func (container *Container) RwChecksum() (string, error) { rwData, err := archive.Tar(container.rwPath(), archive.Xz) if err != nil { return "", err } return utils.HashData(rwData) }
// TarLayer returns a tar archive of the image's filesystem layer. func (img *Image) TarLayer() (archive.Archive, error) { if img.graph == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) } driver := img.graph.driver if differ, ok := driver.(graphdriver.Differ); ok { return differ.Diff(img.ID) } imgFs, err := driver.Get(img.ID) if err != nil { return nil, err } if img.Parent == "" { return archive.Tar(imgFs, archive.Uncompressed) } else { parentFs, err := driver.Get(img.Parent) if err != nil { return nil, err } changes, err := archive.ChangesDirs(imgFs, parentFs) if err != nil { return nil, err } return archive.ExportChanges(imgFs, changes) } }
// TarLayer returns a tar archive of the image's filesystem layer. func (img *Image) TarLayer(compression archive.Compression) (archive.Archive, error) { layerPath, err := img.layer() if err != nil { return nil, err } return archive.Tar(layerPath, compression) }
// Build the Image func (c *ImageService) Build(tag, dir string) error { // tar the file context, err := archive.Tar(dir, archive.Uncompressed) if err != nil { return err } var body io.Reader body = ioutil.NopCloser(context) // Upload the build context v := url.Values{} v.Set("t", tag) v.Set("q", "1") v.Set("rm", "1") // url path path := fmt.Sprintf("/build?%s", v.Encode()) // set content type to tar file headers := http.Header{} headers.Set("Content-Type", "application/tar") // make the request return c.stream("POST", path, body, os.Stdout, headers) }
func (container *Container) Export() (archive.Archive, error) { if err := container.Mount(); err != nil { return nil, err } archive, err := archive.Tar(container.RootfsPath(), archive.Uncompressed) if err != nil { return nil, err } return EofReader(archive, func() { container.Unmount() }), nil }
// hashPath calculates a strong hash (sha256) value for a file tree located // at `basepth`/`pth`, including all attributes that would normally be // captured by `tar`. The path to hash is passed in two pieces only to // permit logging the second piece in isolation, assuming the first is a // temporary directory in which docker is running. If `clobberTimes` is // true and hashPath is applied to a single file, the ctime/atime/mtime of // the file is considered to be unix time 0, for purposes of hashing. func (b *buildFile) hashPath(basePth, pth string, clobberTimes bool) (string, error) { p := path.Join(basePth, pth) st, err := os.Stat(p) if err != nil { return "", err } h := sha256.New() if st.IsDir() { tarRd, err := archive.Tar(p, archive.Uncompressed) if err != nil { return "", err } _, err = io.Copy(h, tarRd) if err != nil { return "", err } } else { hdr, err := tar.FileInfoHeader(st, "") if err != nil { return "", err } if clobberTimes { hdr.AccessTime = time.Unix(0, 0) hdr.ChangeTime = time.Unix(0, 0) hdr.ModTime = time.Unix(0, 0) } hdr.Name = filepath.Base(p) tarWr := tar.NewWriter(h) if err := tarWr.WriteHeader(hdr); err != nil { return "", err } fileRd, err := os.Open(p) if err != nil { return "", err } if _, err = io.Copy(tarWr, fileRd); err != nil { return "", err } tarWr.Close() } hstr := hex.EncodeToString(h.Sum(nil)) fmt.Fprintf(b.outStream, " ---> data at %s has sha256 %.12s...\n", pth, hstr) return hstr, nil }
// TarLayer returns a tar archive of the image's filesystem layer. func (img *Image) TarLayer() (arch archive.Archive, err error) { if img.graph == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) } driver := img.graph.Driver() if differ, ok := driver.(graphdriver.Differ); ok { return differ.Diff(img.ID) } imgFs, err := driver.Get(img.ID, "") if err != nil { return nil, err } defer func() { if err != nil { driver.Put(img.ID) } }() if img.Parent == "" { archive, err := archive.Tar(imgFs, archive.Uncompressed) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(img.ID) return err }), nil } parentFs, err := driver.Get(img.Parent, "") if err != nil { return nil, err } defer driver.Put(img.Parent) changes, err := archive.ChangesDirs(imgFs, parentFs) if err != nil { return nil, err } archive, err := archive.ExportChanges(imgFs, changes) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(img.ID) return err }), nil }
func (container *Container) Export() (archive.Archive, error) { if err := container.Mount(); err != nil { return nil, err } archive, err := archive.Tar(container.basefs, archive.Uncompressed) if err != nil { container.Unmount() return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil }
func testContextTar(t *testing.T, compression archive.Compression) { contextDirectory := filepath.Join(workingDirectory, "build_tests", "TestContextTar") context, err := archive.Tar(contextDirectory, compression) if err != nil { t.Fatalf("failed to build context tar: %v", err) } buildCmd := exec.Command(dockerBinary, "build", "-t", "contexttar", "-") buildCmd.Stdin = context out, exitCode, err := runCommandWithOutput(buildCmd) if err != nil || exitCode != 0 { t.Fatalf("build failed to complete: %v %v", out, err) } deleteImages("contexttar") logDone(fmt.Sprintf("build - build an image with a context tar, compression: %v", compression)) }
func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version < 1.3 { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } remoteURL := r.FormValue("remote") repoName := r.FormValue("t") rawSuppressOutput := r.FormValue("q") rawNoCache := r.FormValue("nocache") rawRm := r.FormValue("rm") repoName, tag := utils.ParseRepositoryTag(repoName) var context io.Reader if remoteURL == "" { context = r.Body } else if utils.IsGIT(remoteURL) { if !strings.HasPrefix(remoteURL, "git://") { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return err } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil { return fmt.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Bzip2) if err != nil { return err } context = c } else if utils.IsURL(remoteURL) { f, err := utils.Download(remoteURL, ioutil.Discard) if err != nil { return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return err } c, err := MkBuildContext(string(dockerFile), nil) if err != nil { return err } context = c } suppressOutput, err := getBoolParam(rawSuppressOutput) if err != nil { return err } noCache, err := getBoolParam(rawNoCache) if err != nil { return err } rm, err := getBoolParam(rawRm) if err != nil { return err } b := NewBuildFile(srv, utils.NewWriteFlusher(w), !suppressOutput, !noCache, rm) id, err := b.Build(context) if err != nil { return fmt.Errorf("Error build: %s", err) } if repoName != "" { srv.runtime.repositories.Set(repoName, tag, id, false) } return nil }
func (container *Container) Export() (archive.Archive, error) { if err := container.EnsureMounted(); err != nil { return nil, err } return archive.Tar(container.RootfsPath(), archive.Uncompressed) }
func (container *Container) ExportRw() (archive.Archive, error) { return archive.Tar(container.rwPath(), archive.Uncompressed) }
func (b *buildFile) CmdAdd(args string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use ADD") } tmp := strings.SplitN(args, " ", 2) if len(tmp) != 2 { return fmt.Errorf("Invalid ADD format") } orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) if err != nil { return err } dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) if err != nil { return err } cmd := b.config.Cmd b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", orig, dest)} b.config.Image = b.image var ( origPath = orig destPath = dest remoteHash string isRemote bool ) if utils.IsURL(orig) { // Initiate the download isRemote = true resp, err := utils.Download(orig) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } defer os.RemoveAll(tmpDirName) // Download and dump result to tmp file if _, err := io.Copy(tmpFile, resp.Body); err != nil { tmpFile.Close() return err } tmpFile.Close() origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // Process the checksum r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum := utils.TarSum{Reader: r, DisableCompression: true} remoteHash = tarSum.Sum(nil) r.Close() // If the destination is a directory, figure out the filename. if strings.HasSuffix(dest, "/") { u, err := url.Parse(orig) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } destPath = dest + filename } } if err := b.checkPathForAddition(origPath); err != nil { return err } // Hash path and check the cache if b.utilizeCache { var ( hash string sums = b.context.GetSums() ) if remoteHash != "" { hash = remoteHash } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { return err } else if fi.IsDir() { var subfiles []string for file, sum := range sums { absFile := path.Join(b.contextPath, file) absOrigPath := path.Join(b.contextPath, origPath) if strings.HasPrefix(absFile, absOrigPath) { subfiles = append(subfiles, sum) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) } else { if origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") if h, ok := sums[origPath]; ok { hash = "file:" + h } } b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)} hit, err := b.probeCache() if err != nil { return err } // If we do not have a hash, never use the cache if hit && hash != "" { return nil } } // Create the container and start it container, _, err := b.runtime.Create(b.config, "") if err != nil { return err } b.tmpContainers[container.ID] = struct{}{} if err := container.Mount(); err != nil { return err } defer container.Unmount() if err := b.addContext(container, origPath, destPath, isRemote); err != nil { return err } if err := b.commit(container.ID, cmd, fmt.Sprintf("ADD %s in %s", orig, dest)); err != nil { return err } b.config.Cmd = cmd return nil }
func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if version < 1.3 { return fmt.Errorf("Multipart upload for build is no longer supported. Please upgrade your docker client.") } var ( remoteURL = r.FormValue("remote") repoName = r.FormValue("t") rawSuppressOutput = r.FormValue("q") rawNoCache = r.FormValue("nocache") rawRm = r.FormValue("rm") authEncoded = r.Header.Get("X-Registry-Auth") authConfig = &auth.AuthConfig{} configFileEncoded = r.Header.Get("X-Registry-Config") configFile = &auth.ConfigFile{} tag string ) repoName, tag = utils.ParseRepositoryTag(repoName) // This block can be removed when API versions prior to 1.9 are deprecated. // Both headers will be parsed and sent along to the daemon, but if a non-empty // ConfigFile is present, any value provided as an AuthConfig directly will // be overridden. See BuildFile::CmdFrom for details. if version < 1.9 && authEncoded != "" { authJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) if err := json.NewDecoder(authJson).Decode(authConfig); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty authConfig = &auth.AuthConfig{} } } if configFileEncoded != "" { configFileJson := base64.NewDecoder(base64.URLEncoding, strings.NewReader(configFileEncoded)) if err := json.NewDecoder(configFileJson).Decode(configFile); err != nil { // for a pull it is not an error if no auth was given // to increase compatibility with the existing api it is defaulting to be empty configFile = &auth.ConfigFile{} } } var context io.Reader if remoteURL == "" { context = r.Body } else if utils.IsGIT(remoteURL) { if !strings.HasPrefix(remoteURL, "git://") { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return err } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", remoteURL, root).CombinedOutput(); err != nil { return fmt.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return err } context = c } else if utils.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return err } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return err } c, err := MkBuildContext(string(dockerFile), nil) if err != nil { return err } context = c } suppressOutput, err := getBoolParam(rawSuppressOutput) if err != nil { return err } noCache, err := getBoolParam(rawNoCache) if err != nil { return err } rm, err := getBoolParam(rawRm) if err != nil { return err } if version >= 1.8 { w.Header().Set("Content-Type", "application/json") } sf := utils.NewStreamFormatter(version >= 1.8) b := NewBuildFile(srv, &StdoutFormater{ Writer: utils.NewWriteFlusher(w), StreamFormatter: sf, }, &StderrFormater{ Writer: utils.NewWriteFlusher(w), StreamFormatter: sf, }, !suppressOutput, !noCache, rm, utils.NewWriteFlusher(w), sf, authConfig, configFile) id, err := b.Build(context) if err != nil { if sf.Used() { w.Write(sf.FormatError(err)) return nil } return fmt.Errorf("Error build: %s", err) } if repoName != "" { srv.runtime.repositories.Set(repoName, tag, id, false) } return nil }
// ImageExport exports all images with the given tag. All versions // containing the same tag are exported. The resulting output is an // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. func (srv *Server) ImageExport(name string, out io.Writer) error { // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { return err } defer os.RemoveAll(tempdir) utils.Debugf("Serializing %s", name) rootRepo := srv.runtime.repositories.Repositories[name] for _, rootImage := range rootRepo { image, _ := srv.ImageInspect(rootImage) for i := image; i != nil; { // temporary directory tmpImageDir := path.Join(tempdir, i.ID) if err := os.Mkdir(tmpImageDir, os.ModeDir); err != nil { return err } defer os.RemoveAll(tmpImageDir) var version = "1.0" var versionBuf = []byte(version) if err := ioutil.WriteFile(path.Join(tmpImageDir, "VERSION"), versionBuf, os.ModeAppend); err != nil { return err } // serialize json b, err := json.Marshal(i) if err != nil { return err } if err := ioutil.WriteFile(path.Join(tmpImageDir, "json"), b, os.ModeAppend); err != nil { return err } // serialize filesystem fs, err := archive.Tar(path.Join(srv.runtime.graph.Root, i.ID, "layer"), archive.Uncompressed) if err != nil { return err } fsTar, err := os.Create(path.Join(tmpImageDir, "layer.tar")) if err != nil { return err } if _, err = io.Copy(fsTar, fs); err != nil { return err } fsTar.Close() // find parent if i.Parent != "" { i, err = srv.ImageInspect(i.Parent) if err != nil { return err } } else { i = nil } } } // write repositories rootRepoMap := map[string]Repository{} rootRepoMap[name] = rootRepo rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.ModeAppend); err != nil { return err } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { return err } if _, err := io.Copy(out, fs); err != nil { return err } return nil }