// TarLayer returns a tar archive of the image's filesystem layer. func (img *Image) TarLayer() (arch archive.Archive, err error) { if img.graph == nil { return nil, fmt.Errorf("Can't load storage driver for unregistered image %s", img.ID) } driver := img.graph.Driver() if differ, ok := driver.(graphdriver.Differ); ok { return differ.Diff(img.ID) } imgFs, err := driver.Get(img.ID, "") if err != nil { return nil, err } defer func() { if err != nil { driver.Put(img.ID) } }() if img.Parent == "" { archive, err := archive.Tar(imgFs, archive.Uncompressed) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(img.ID) return err }), nil } parentFs, err := driver.Get(img.Parent, "") if err != nil { return nil, err } defer driver.Put(img.Parent) changes, err := archive.ChangesDirs(imgFs, parentFs) if err != nil { return nil, err } archive, err := archive.ExportChanges(imgFs, changes) if err != nil { return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() driver.Put(img.ID) return err }), nil }
func (container *Container) Export() (archive.Archive, error) { if err := container.Mount(); err != nil { return nil, err } archive, err := archive.Tar(container.basefs, archive.Uncompressed) if err != nil { container.Unmount() return nil, err } return utils.NewReadCloserWrapper(archive, func() error { err := archive.Close() container.Unmount() return err }), nil }
// CmdImageExport exports all images with the given tag. All versions // containing the same tag are exported. The resulting output is an // uncompressed tar ball. // name is the set of tags to export. // out is the writer where the images are written to. func (s *TagStore) CmdImageExport(job *engine.Job) engine.Status { if len(job.Args) != 1 { return job.Errorf("Usage: %s IMAGE\n", job.Name) } name := job.Args[0] // get image json tempdir, err := ioutil.TempDir("", "docker-export-") if err != nil { return job.Error(err) } defer os.RemoveAll(tempdir) log.Debugf("Serializing %s", name) rootRepoMap := map[string]Repository{} rootRepo, err := s.Get(name) if err != nil { return job.Error(err) } if rootRepo != nil { // this is a base repo name, like 'busybox' for _, id := range rootRepo { if err := s.exportImage(job.Eng, id, tempdir); err != nil { return job.Error(err) } } rootRepoMap[name] = rootRepo } else { img, err := s.LookupImage(name) if err != nil { return job.Error(err) } if img != nil { // This is a named image like 'busybox:latest' repoName, repoTag := parsers.ParseRepositoryTag(name) if err := s.exportImage(job.Eng, img.ID, tempdir); err != nil { return job.Error(err) } // check this length, because a lookup of a truncated has will not have a tag // and will not need to be added to this map if len(repoTag) > 0 { rootRepoMap[repoName] = Repository{repoTag: img.ID} } } else { // this must be an ID that didn't get looked up just right? if err := s.exportImage(job.Eng, name, tempdir); err != nil { return job.Error(err) } } } // write repositories, if there is something to write if len(rootRepoMap) > 0 { rootRepoJson, _ := json.Marshal(rootRepoMap) if err := ioutil.WriteFile(path.Join(tempdir, "repositories"), rootRepoJson, os.FileMode(0644)); err != nil { return job.Error(err) } } else { log.Debugf("There were no repositories to write") } fs, err := archive.Tar(tempdir, archive.Uncompressed) if err != nil { return job.Error(err) } defer fs.Close() if _, err := io.Copy(job.Stdout, fs); err != nil { return job.Error(err) } log.Debugf("End Serializing %s", name) return engine.StatusOK }
func (b *buildFile) runContextCommand(args string, allowRemote bool, allowDecompression bool, cmdName string) error { if b.context == nil { return fmt.Errorf("No context given. Impossible to use %s", cmdName) } tmp := strings.SplitN(args, " ", 2) if len(tmp) != 2 { return fmt.Errorf("Invalid %s format", cmdName) } orig, err := b.ReplaceEnvMatches(strings.Trim(tmp[0], " \t")) if err != nil { return err } dest, err := b.ReplaceEnvMatches(strings.Trim(tmp[1], " \t")) if err != nil { return err } cmd := b.config.Cmd b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, orig, dest)} defer func(cmd []string) { b.config.Cmd = cmd }(cmd) b.config.Image = b.image var ( origPath = orig destPath = dest remoteHash string isRemote bool decompress = true ) isRemote = utils.IsURL(orig) if isRemote && !allowRemote { return fmt.Errorf("Source can't be an URL for %s", cmdName) } else if utils.IsURL(orig) { // Initiate the download resp, err := utils.Download(orig) if err != nil { return err } // Create a tmp dir tmpDirName, err := ioutil.TempDir(b.contextPath, "docker-remote") if err != nil { return err } // Create a tmp file within our tmp dir tmpFileName := path.Join(tmpDirName, "tmp") tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } defer os.RemoveAll(tmpDirName) // Download and dump result to tmp file if _, err := io.Copy(tmpFile, resp.Body); err != nil { tmpFile.Close() return err } tmpFile.Close() // Remove the mtime of the newly created tmp file if err := system.UtimesNano(tmpFileName, make([]syscall.Timespec, 2)); err != nil { return err } origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName)) // Process the checksum r, err := archive.Tar(tmpFileName, archive.Uncompressed) if err != nil { return err } tarSum := &tarsum.TarSum{Reader: r, DisableCompression: true} if _, err := io.Copy(ioutil.Discard, tarSum); err != nil { return err } remoteHash = tarSum.Sum(nil) r.Close() // If the destination is a directory, figure out the filename. if strings.HasSuffix(dest, "/") { u, err := url.Parse(orig) if err != nil { return err } path := u.Path if strings.HasSuffix(path, "/") { path = path[:len(path)-1] } parts := strings.Split(path, "/") filename := parts[len(parts)-1] if filename == "" { return fmt.Errorf("cannot determine filename from url: %s", u) } destPath = dest + filename } } if err := b.checkPathForAddition(origPath); err != nil { return err } // Hash path and check the cache if b.utilizeCache { var ( hash string sums = b.context.GetSums() ) if remoteHash != "" { hash = remoteHash } else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil { return err } else if fi.IsDir() { var subfiles []string for file, sum := range sums { absFile := path.Join(b.contextPath, file) absOrigPath := path.Join(b.contextPath, origPath) if strings.HasPrefix(absFile, absOrigPath) { subfiles = append(subfiles, sum) } } sort.Strings(subfiles) hasher := sha256.New() hasher.Write([]byte(strings.Join(subfiles, ","))) hash = "dir:" + hex.EncodeToString(hasher.Sum(nil)) } else { if origPath[0] == '/' && len(origPath) > 1 { origPath = origPath[1:] } origPath = strings.TrimPrefix(origPath, "./") if h, ok := sums[origPath]; ok { hash = "file:" + h } } b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) %s %s in %s", cmdName, hash, dest)} hit, err := b.probeCache() if err != nil { return err } // If we do not have a hash, never use the cache if hit && hash != "" { return nil } } // Create the container container, _, err := b.daemon.Create(b.config, "") if err != nil { return err } b.tmpContainers[container.ID] = struct{}{} if err := container.Mount(); err != nil { return err } defer container.Unmount() if !allowDecompression || isRemote { decompress = false } if err := b.addContext(container, origPath, destPath, decompress); err != nil { return err } if err := b.commit(container.ID, cmd, fmt.Sprintf("%s %s in %s", cmdName, orig, dest)); err != nil { return err } return nil }
func (daemon *Daemon) CmdBuild(job *engine.Job) engine.Status { if len(job.Args) != 0 { return job.Errorf("Usage: %s\n", job.Name) } var ( remoteURL = job.Getenv("remote") repoName = job.Getenv("t") suppressOutput = job.GetenvBool("q") noCache = job.GetenvBool("nocache") rm = job.GetenvBool("rm") forceRm = job.GetenvBool("forcerm") authConfig = ®istry.AuthConfig{} configFile = ®istry.ConfigFile{} tag string context io.ReadCloser ) job.GetenvJson("authConfig", authConfig) job.GetenvJson("configFile", configFile) repoName, tag = parsers.ParseRepositoryTag(repoName) if remoteURL == "" { context = ioutil.NopCloser(job.Stdin) } else if utils.IsGIT(remoteURL) { if !strings.HasPrefix(remoteURL, "git://") { remoteURL = "https://" + remoteURL } root, err := ioutil.TempDir("", "docker-build-git") if err != nil { return job.Error(err) } defer os.RemoveAll(root) if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil { return job.Errorf("Error trying to use git: %s (%s)", err, output) } c, err := archive.Tar(root, archive.Uncompressed) if err != nil { return job.Error(err) } context = c } else if utils.IsURL(remoteURL) { f, err := utils.Download(remoteURL) if err != nil { return job.Error(err) } defer f.Body.Close() dockerFile, err := ioutil.ReadAll(f.Body) if err != nil { return job.Error(err) } c, err := archive.Generate("Dockerfile", string(dockerFile)) if err != nil { return job.Error(err) } context = c } defer context.Close() sf := utils.NewStreamFormatter(job.GetenvBool("json")) b := NewBuildFile(daemon, daemon.eng, &utils.StdoutFormater{ Writer: job.Stdout, StreamFormatter: sf, }, &utils.StderrFormater{ Writer: job.Stdout, StreamFormatter: sf, }, !suppressOutput, !noCache, rm, forceRm, job.Stdout, sf, authConfig, configFile) id, err := b.Build(context) if err != nil { return job.Error(err) } if repoName != "" { daemon.Repositories().Set(repoName, tag, id, false) } return engine.StatusOK }