func (c *DaemonClient) ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { if c.imageName != "" { if len(options.Tags) != 1 || options.Tags[0] != c.imageName { return types.ImageBuildResponse{}, fmt.Errorf("expected image %q, got %v", c.imageName, options.Tags) } } if c.contextDir != "" { tmp, err := ioutil.TempDir("", "image-build-test") if err != nil { return types.ImageBuildResponse{}, err } if err := archive.Untar(context, tmp, nil); err != nil { return types.ImageBuildResponse{}, err } changes, err := archive.ChangesDirs(tmp, c.contextDir) if err != nil { return types.ImageBuildResponse{}, err } if len(changes) != c.changes { return types.ImageBuildResponse{}, fmt.Errorf("expected %d changes, got %v", c.changes, changes) } b, err := json.Marshal(c.message) if err != nil { return types.ImageBuildResponse{}, err } return types.ImageBuildResponse{ Body: ioutil.NopCloser(bytes.NewReader(b)), }, nil } return c.NopClient.ImageBuild(ctx, context, options) }
// getContextFromReader will read the contents of the given reader as either a // Dockerfile or tar archive to be extracted to a temporary directory used as // the context directory. Returns the absolute path to the temporary context // directory, the relative path of the dockerfile in that context directory, // and a non-nil error on success. func getContextFromReader(r io.Reader, dockerfileName string) (absContextDir, relDockerfile string, err error) { buf := bufio.NewReader(r) magic, err := buf.Peek(archive.HeaderSize) if err != nil && err != io.EOF { return "", "", fmt.Errorf("failed to peek context header from STDIN: %v", err) } if absContextDir, err = ioutil.TempDir("", "docker-build-context-"); err != nil { return "", "", fmt.Errorf("unbale to create temporary context directory: %v", err) } defer func(d string) { if err != nil { os.RemoveAll(d) } }(absContextDir) if !archive.IsArchive(magic) { // Input should be read as a Dockerfile. // -f option has no meaning when we're reading it from stdin, // so just use our default Dockerfile name relDockerfile = api.DefaultDockerfileName return absContextDir, relDockerfile, writeToFile(buf, filepath.Join(absContextDir, relDockerfile)) } if err := archive.Untar(buf, absContextDir, nil); err != nil { return "", "", fmt.Errorf("unable to extract stdin to temporary context directory: %v", err) } return getDockerfileRelPath(absContextDir, dockerfileName) }
func checkTarCorrect(tar archive.Archive, expectedFiles, unexpectedFiles []string, t *testing.T) { err := archive.Untar(tar, "/tmp/tar", nil) asserErrNil(err, t) defer os.RemoveAll("/tmp/tar") filesShouldExist(true, expectedFiles, "/tmp/tar", t) filesShouldExist(false, unexpectedFiles, "/tmp/tar", t) }
// UntarToDest writes to user destination the streamed tarball in input func UntarToDest(api *api.ScalewayAPI, sourceStream *io.ReadCloser, destination string) error { // destination is a server address + path (scp-like uri) if strings.Index(destination, ":") > -1 { log.Debugf("Streaming using ssh and untaring remotely") serverParts := strings.Split(destination, ":") if len(serverParts) != 2 { return fmt.Errorf("invalid destination uri, see 'scw cp -h' for usage") } serverID := api.GetServerID(serverParts[0]) server, err := api.GetServer(serverID) if err != nil { return err } // remoteCommand is executed on the remote server // it streams a tarball raw content remoteCommand := []string{"tar"} remoteCommand = append(remoteCommand, "-C", serverParts[1]) if os.Getenv("DEBUG") == "1" { remoteCommand = append(remoteCommand, "-v") } remoteCommand = append(remoteCommand, "-xf", "-") // execCmd contains the ssh connection + the remoteCommand execCmd := append(utils.NewSSHExecCmd(server.PublicAddress.IP, false, remoteCommand)) log.Debugf("Executing: ssh %s", strings.Join(execCmd, " ")) spawnDst := exec.Command("ssh", execCmd...) untarInputStream, err := spawnDst.StdinPipe() if err != nil { return err } defer untarInputStream.Close() // spawnDst.Stderr = os.Stderr // spawnDst.Stdout = os.Stdout err = spawnDst.Start() if err != nil { return err } _, err = io.Copy(untarInputStream, *sourceStream) return err } // destination is stdout if destination == "-" { // stdout log.Debugf("Writing sourceStream(%v) to os.Stdout(%v)", sourceStream, os.Stdout) _, err := io.Copy(os.Stdout, *sourceStream) return err } // destination is a path on localhost log.Debugf("Untaring to local path: %s", destination) err := archive.Untar(*sourceStream, destination, &archive.TarOptions{NoLchown: true}) return err }
func themePullCommand(ctx *cli.Context) { workdir := ctx.GlobalString("workdir") if workdir == "" { fmt.Println("unknown working directory, please use -w to provide.") os.Exit(1) } // directory directory, err := filepath.Abs(workdir) if err != nil { fmt.Println("workdir:", err) return } stderr := os.Stderr if len(ctx.Args()) != 1 { fmt.Fprintln(stderr, "please input theme id with format: provider/name:version") os.Exit(1) } provider, name, version, err := parse_theme(ctx.Args()[0]) if err != nil { fmt.Fprintln(stderr, "package format: provider/name:version, please try again") os.Exit(1) } // auth config config, err := LoadConfigFile(directory) if err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } host := ctx.String("Host") port := ctx.Int("Port") client := NewClient(directory, host, port) var pkg Package pkg.Provider = provider pkg.Name = name pkg.Version = version if err := client.ThemePull(config.Auth.Token, &pkg); err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } bar := pb.New(int(pkg.ArchiveLen)).SetUnits(pb.U_BYTES) bar.Prefix(fmt.Sprintf("%s/%s:%s ", pkg.Provider, pkg.Name, pkg.Version)) bar.Start() // create multi writer rd := pb.NewPbReader(pkg.ArchiveReader, bar) if err := archive.Untar(rd, directory, nil); err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } bar.FinishPrint(fmt.Sprintf("%s/%s:%s pulled succussfully.", pkg.Provider, pkg.Name, pkg.Version)) }
func untar(value []byte, dirPath string) error { return archive.Untar( bytes.NewReader(value), dirPath, &archive.TarOptions{ NoLchown: true, }, ) }
func copyForExport(docker docker.Docker, v *Volume) (io.Reader, error) { bindSpec := v.HostPath + ":/.dockervolume" vJson, err := json.MarshalIndent(v, "", " ") if err != nil { return nil, fmt.Errorf("Could not export volume data") } jsonStr := string(vJson) // Since we're using busybox's tar, it does not support appending files // Instead we'll handle adding in Dockerfile/config.json manually cmd := fmt.Sprintf( "mkdir -p /volumeData && cp -r /.dockervolume /volumeData/data && echo '%s' > /volumeData/Dockerfile && echo '%s' > /volumeData/config.json; cd /volumeData && tar -cf volume.tar .", ExportDockerfile, jsonStr, ) containerConfig := map[string]interface{}{ "Image": "busybox", "Cmd": []string{"/bin/sh", "-c", cmd}, "Volumes": map[string]struct{}{ "/.dockervolume": struct{}{}, }, "HostConfig": map[string]interface{}{ "Binds": []string{bindSpec}, }, } containerId, err := docker.RunContainer(containerConfig) if err != nil { return nil, fmt.Errorf("%s - %s", containerId, err) } defer docker.RemoveContainer(containerId, true, true) // Wait for the container to exit, signaling that our archive is ready if err := docker.ContainerWait(containerId); err != nil { return nil, fmt.Errorf("Could not get archive: %s", err) } // This is a tar of a tar, we only want the inner tar, so do some more stuff tmpArch, err := docker.Copy(containerId, "/volumeData/volume.tar") if err != nil { return nil, fmt.Errorf("Could not get archive: %s", err) } id := GenerateRandomID() tmpDir, err := ioutil.TempDir("", id) if err != nil { return nil, fmt.Errorf("Could not create temp dir: %s", err) } defer os.RemoveAll(tmpDir) // extract the tar to a temp dir so we can get the inner-tar if err := archive.Untar(tmpArch, tmpDir, &archive.TarOptions{Compression: archive.Uncompressed, NoLchown: true}); err != nil { return nil, fmt.Errorf("Could not untar archive: %s", err) } // Get the inner-tar and output to stdout return os.Open(tmpDir + "/volume.tar") }
func (c *tarArchiver) Decompress(reader io.Reader, dirPath string) error { return archive.Untar( reader, dirPath, &archive.TarOptions{ NoLchown: true, }, ) }
func ExtractTarGz(in io.Reader, dest string, uid int, gid int) (err error) { return archive.Untar(in, dest, &archive.TarOptions{ Compression: archive.Gzip, NoLchown: false, ChownOpts: &archive.TarChownOptions{ UID: uid, GID: gid, }, ExcludePatterns: []string{"dev/"}, // prevent operation not permitted }) }
func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) { tmpdir, err = ioutil.TempDir("", "go-dockerclient-test") if err != nil { return } err = archive.Untar(req, tmpdir, &archive.TarOptions{ Compression: archive.Uncompressed, NoLchown: true, }) return }
func ExtractTar(in io.Reader, dest string) (err error) { //func ExtractTarGz(in io.Reader, dest string, uid int, gid int) (err error) { // ChownOpts: &archive.TarChownOptions{ // UID: uid, // GID: gid, // }, return archive.Untar(in, dest, &archive.TarOptions{ NoLchown: false, ExcludePatterns: []string{"dev/"}, // prevent operation not permitted }) }
//Restore restores the contents of dir from the cache func (c Cache) Restore(dir, task string) { path := c.path(dir, task) cacheReader, err := c.Store.Reader(c.path(dir, task)) if cacheReader != nil { err = archive.Untar(cacheReader, dir, &archive.TarOptions{}) c.log.Info("Restoring cache for", dir, "from", path) if err != nil { c.log.Error("Error restoring cache for", dir, "from", path, err.Error()) } } else { c.log.Info("No Cache for", dir, "to restore") } }
func extractVolConfigJson(imgId string, docker docker.Docker) (string, error) { extractVolInfoConfig := map[string]interface{}{ "Image": imgId, "Cmd": []string{"/bin/sh", "-c", "true"}, } cid1, err := docker.RunContainer(extractVolInfoConfig) if err != nil { return "", fmt.Errorf("Could not extract volume config: ", err) } defer docker.RemoveContainer(cid1, true, true) docker.ContainerWait(cid1) tmpArch, err := docker.Copy(cid1, "/.volData/config.json") if err != nil { return "", fmt.Errorf("Could not extract volume config: ", err) } // Setup tmp dir for extracting the downloaded archive id := GenerateRandomID() tmpDir, err := ioutil.TempDir("", id) if err != nil { return "", fmt.Errorf("Could not create temp dir: ", err) } defer os.RemoveAll(tmpDir) // extract the tar to a temp dir so we can get the inner-tar if err := archive.Untar(tmpArch, tmpDir, &archive.TarOptions{Compression: archive.Uncompressed, NoLchown: true}); err != nil { return "", fmt.Errorf("Could not untar archive", err) } // Get the inner-tar and output to stdout configFile, err := os.Open(tmpDir + "/config.json") if err != nil { return "", fmt.Errorf("Could not open config.json: ", err) } var volConfig Volume if err := json.NewDecoder(configFile).Decode(&volConfig); err != nil { fmt.Errorf("Could not read config.json: ", err) } return volConfig.VolPath, nil }
// CmdCp copies files/folders from a path on the container to a directory on the host running the command. // // If HOSTDIR is '-', the data is written as a tar file to STDOUT. // // Usage: docker cp CONTAINER:PATH HOSTDIR func (cli *DockerCli) CmdCp(args ...string) error { cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data\nas a tar file to STDOUT.", true) cmd.Require(flag.Exact, 2) cmd.ParseFlags(args, true) var copyData engine.Env info := strings.Split(cmd.Arg(0), ":") if len(info) != 2 { return fmt.Errorf("Error: Path not specified") } copyData.Set("Resource", info[1]) copyData.Set("HostPath", cmd.Arg(1)) stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, nil) if stream != nil { defer stream.Close() } if statusCode == 404 { return fmt.Errorf("No such container: %v", info[0]) } if err != nil { return err } if statusCode == 200 { dest := copyData.Get("HostPath") if dest == "-" { _, err = io.Copy(cli.out, stream) } else { err = archive.Untar(stream, dest, &archive.TarOptions{NoLchown: true}) } if err != nil { return err } } return nil }
func (b *Builder) readContext(context io.Reader) error { tmpdirPath, err := ioutil.TempDir("", "docker-build") if err != nil { return err } decompressedStream, err := archive.DecompressStream(context) if err != nil { return err } if b.context, err = tarsum.NewTarSum(decompressedStream, true, tarsum.Version0); err != nil { return err } if err := archive.Untar(b.context, tmpdirPath, nil); err != nil { return err } b.contextPath = tmpdirPath return nil }
// CmdCp copies files/folders from a path on the container to a directory on the host running the command. // // If HOSTDIR is '-', the data is written as a tar file to STDOUT. // // Usage: docker cp CONTAINER:PATH HOSTDIR func (cli *DockerCli) CmdCp(args ...string) error { cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTDIR|-", "Copy files/folders from a PATH on the container to a HOSTDIR on the host\nrunning the command. Use '-' to write the data as a tar file to STDOUT.", true) cmd.Require(flag.Exact, 2) cmd.ParseFlags(args, true) // deal with path name with `:` info := strings.SplitN(cmd.Arg(0), ":", 2) if len(info) != 2 { return fmt.Errorf("Error: Path not specified") } cfg := &types.CopyConfig{ Resource: info[1], } stream, _, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", cfg, nil) if stream != nil { defer stream.Close() } if statusCode == 404 { return fmt.Errorf("No such container: %v", info[0]) } if err != nil { return err } hostPath := cmd.Arg(1) if statusCode == 200 { if hostPath == "-" { _, err = io.Copy(cli.out, stream) } else { err = archive.Untar(stream, hostPath, &archive.TarOptions{NoLchown: true}) } if err != nil { return err } } return nil }
func unpackRootfs(spec *specs.Spec) error { data, err := base64.StdEncoding.DecodeString(DATA) if err != nil { return err } if err := os.MkdirAll(defaultRootfsDir, 0755); err != nil { return err } r := bytes.NewReader(data) if err := archive.Untar(r, defaultRootfsDir, nil); err != nil { return err } // write a resolv.conf if err := ioutil.WriteFile(filepath.Join(defaultRootfsDir, "etc", "resolv.conf"), []byte("nameserver 8.8.8.8\nnameserver 8.8.4.4"), 0755); err != nil { return err } return nil }
Expect(err).NotTo(HaveOccurred()) _, err = fetcher.Fetch(&url.URL{Path: dirPath}, 0) Expect(err).NotTo(HaveOccurred()) Expect(registeredImage).NotTo(BeNil()) Expect(registeredImage.ID).To(HaveSuffix("foo_bar_baz")) }) It("registers the image with the correct layer data", func() { fakeCake.RegisterStub = func(image *image.Image, layer archive.ArchiveReader) error { tmp, err := ioutil.TempDir("", "") Expect(err).NotTo(HaveOccurred()) defer os.RemoveAll(tmp) Expect(archive.Untar(layer, tmp, nil)).To(Succeed()) Expect(path.Join(tmp, "a", "test", "file")).To(BeAnExistingFile()) return nil } tmp, err := ioutil.TempDir("", "") Expect(err).NotTo(HaveOccurred()) Expect(os.MkdirAll(path.Join(tmp, "a", "test"), 0700)).To(Succeed()) Expect(ioutil.WriteFile(path.Join(tmp, "a", "test", "file"), []byte(""), 0700)).To(Succeed()) _, err = fetcher.Fetch(&url.URL{Path: tmp}, 0) Expect(err).NotTo(HaveOccurred()) })
func Untar(in io.Reader, dest string, sameOwner bool) error { return archive.Untar(in, dest, &archive.TarOptions{ NoLchown: !sameOwner, ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted' }) }
// Create the image directory, create a temp vmdk in this directory, // attach/mount the disk, unpack the tar, check the checksum. If the data // doesn't match the expected checksum, abort by nuking the image directory. // If everything matches, move the tmp vmdk to ID.vmdk. The unwind path is a // bit convoluted here; we need to clean up on the way out in the error case func (v *ImageStore) writeImage(ctx context.Context, storeName, parentID, ID string, meta map[string][]byte, sum string, r io.Reader) error { // Create a temp image directory in the store. imageDir := v.imageDirPath(storeName, ID) _, err := v.ds.Mkdir(ctx, true, imageDir) if err != nil { return err } // Write the metadata to the datastore metaDataDir := v.imageMetadataDirPath(storeName, ID) err = writeMetadata(ctx, v.ds, metaDataDir, meta) if err != nil { return err } // datastore path to the parent parentDiskDsURI := v.imageDiskPath(storeName, parentID) // datastore path to the disk we're creating diskDsURI := v.imageDiskPath(storeName, ID) log.Infof("Creating image %s (%s)", ID, diskDsURI) // Create the disk vmdisk, err := v.dm.CreateAndAttach(ctx, diskDsURI, parentDiskDsURI, 0, os.O_RDWR) if err != nil { return err } defer func() { var cleanup bool if vmdisk.Mounted() { cleanup = true log.Debugf("Unmounting abandonned disk") vmdisk.Unmount() } if vmdisk.Attached() { cleanup = true log.Debugf("Detaching abandonned disk") v.dm.Detach(ctx, vmdisk) } if cleanup { v.ds.Rm(ctx, imageDir) } }() // tmp dir to mount the disk dir, err := ioutil.TempDir("", "mnt-"+ID) if err != nil { return err } defer os.RemoveAll(dir) if err := vmdisk.Mount(dir, nil); err != nil { return err } h := sha256.New() t := io.TeeReader(r, h) // Untar the archive if err = archive.Untar(t, dir, &archive.TarOptions{}); err != nil { return err } actualSum := fmt.Sprintf("sha256:%x", h.Sum(nil)) if actualSum != sum { return fmt.Errorf("Failed to validate image checksum. Expected %s, got %s", sum, actualSum) } if err = vmdisk.Unmount(); err != nil { return err } if err = v.dm.Detach(ctx, vmdisk); err != nil { return err } // Write our own bookkeeping manifest file to the image's directory. We // treat the manifest file like a done file. Its existence means this vmdk // is consistent. if err = v.writeManifest(ctx, storeName, ID, nil); err != nil { return err } return nil }
// UntarToDest writes to user destination the streamed tarball in input func UntarToDest(ctx CommandContext, sourceStream *io.ReadCloser, destination string, gateway string) error { // destination is a server address + path (scp-like uri) if strings.Contains(destination, ":") { logrus.Debugf("Streaming using ssh and untaring remotely") serverParts := strings.Split(destination, ":") if len(serverParts) != 2 { return fmt.Errorf("invalid destination uri, see 'scw cp -h' for usage") } serverID, err := ctx.API.GetServerID(serverParts[0]) if err != nil { return err } server, err := ctx.API.GetServer(serverID) if err != nil { return err } // remoteCommand is executed on the remote server // it streams a tarball raw content remoteCommand := []string{"tar"} remoteCommand = append(remoteCommand, "-C", serverParts[1]) if ctx.Getenv("DEBUG") == "1" { remoteCommand = append(remoteCommand, "-v") } remoteCommand = append(remoteCommand, "-xf", "-") // Resolve gateway if gateway == "" { gateway = ctx.Getenv("SCW_GATEWAY") } if gateway == serverID || gateway == serverParts[0] { gateway = "" } else { gateway, err = api.ResolveGateway(ctx.API, gateway) if err != nil { return fmt.Errorf("cannot resolve Gateway '%s': %v", gateway, err) } } // execCmd contains the ssh connection + the remoteCommand sshCommand := utils.NewSSHExecCmd(server.PublicAddress.IP, server.PrivateIP, false, remoteCommand, gateway) logrus.Debugf("Executing: %s", sshCommand) spawnDst := exec.Command("ssh", sshCommand.Slice()[1:]...) untarInputStream, err := spawnDst.StdinPipe() if err != nil { return err } defer untarInputStream.Close() // spawnDst.Stderr = ctx.Stderr // spawnDst.Stdout = ctx.Stdout err = spawnDst.Start() if err != nil { return err } _, err = io.Copy(untarInputStream, *sourceStream) return err } // destination is stdout if destination == "-" { // stdout logrus.Debugf("Writing sourceStream(%v) to ctx.Stdout(%v)", sourceStream, ctx.Stdout) _, err := io.Copy(ctx.Stdout, *sourceStream) return err } // destination is a path on localhost logrus.Debugf("Untaring to local path: %s", destination) err := archive.Untar(*sourceStream, destination, &archive.TarOptions{NoLchown: true}) return err }
func deployPullCommand(ctx *cli.Context) { workdir := ctx.GlobalString("workdir") if workdir == "" { fmt.Println("unknown working directory, please use -w to provide.") os.Exit(1) } // directory directory, err := filepath.Abs(workdir) if err != nil { fmt.Println("workdir:", err) return } stderr := os.Stderr h2oconf, err := app.LoadCONFIG(path.Join(directory, "h2object.conf")) if err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } h2oconf.SetSection("deploy") Host := h2oconf.StringDefault("host", "") Port := h2oconf.IntDefault("port", 80) AppID := h2oconf.StringDefault("appid", "") Secret := h2oconf.StringDefault("secret", "") client := api.NewClient(Host, Port) auth := api.NewAdminAuth(AppID, Secret) dirs := ctx.Args() if len(dirs) == 0 { body, size, err := client.Download(nil, auth, path.Join("/", ".export")) if err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } bar := pb.New(int(size)).SetUnits(pb.U_BYTES) bar.Prefix("/ ") bar.Start() // create multi writer rd := pb.NewPbReader(body, bar) if err := archive.Untar(rd, directory, nil); err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } bar.FinishPrint(fmt.Sprintf("/ pulled succussfully without <h2object.conf> file.")) } else { for _, dir := range dirs { if !strings.HasPrefix(dir, "markdowns") && !strings.HasPrefix(dir, "templates") && !strings.HasPrefix(dir, "statics") && !strings.HasPrefix(dir, "storage") && !strings.HasPrefix(dir, "indexes") { fmt.Fprintf(stderr, "push path ignored: %s\n", dir) continue } body, size, err := client.Download(nil, auth, path.Join("/", dir + ".export")) if err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } bar := pb.New(int(size)).SetUnits(pb.U_BYTES) bar.Prefix(dir + " ") bar.Start() // create multi writer rd := pb.NewPbReader(body, bar) if err := archive.Untar(rd, path.Join(directory, dir), nil); err != nil { fmt.Fprintln(stderr, err.Error()) os.Exit(1) } bar.FinishPrint(fmt.Sprintf("%s pulled succussfully.", dir)) } } }
func (a *Driver) applyDiff(id string, diff archive.ArchiveReader) error { return archive.Untar(diff, path.Join(a.rootPath(), "diff", id), nil) }
func (b *Builder) createLayer(ids []string) (*ct.ImageLayer, error) { imageID := ids[len(ids)-1] layer, err := b.Store.Load(imageID) if err != nil { return nil, err } else if layer != nil { return layer, nil } // apply the docker layer diffs to a temporary directory dir, err := ioutil.TempDir("", "docker-layer-") if err != nil { return nil, err } defer os.RemoveAll(dir) for _, id := range ids { diff, err := b.Context.Diff(id, "") if err != nil { return nil, err } if err := archive.Untar(diff, dir, &archive.TarOptions{}); err != nil { return nil, err } // convert Docker AUFS whiteouts to overlay whiteouts. // // See the "whiteouts and opaque directories" section of the // OverlayFS documentation for a description of the whiteout // file formats: // https://www.kernel.org/doc/Documentation/filesystems/overlayfs.txt err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if !info.Mode().IsRegular() { return nil } base := filepath.Base(path) dir := filepath.Dir(path) if base == archive.WhiteoutOpaqueDir { if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { return err } return os.Remove(path) } if !strings.HasPrefix(base, archive.WhiteoutPrefix) { return nil } // replace the file which the AUFS whiteout is hiding // with an overlay whiteout file, and remove the AUFS // whiteout name := filepath.Join(dir, strings.TrimPrefix(base, archive.WhiteoutPrefix)) if err := os.RemoveAll(name); err != nil { return err } if err := syscall.Mknod(name, syscall.S_IFCHR, 0); err != nil { return err } stat := info.Sys().(*syscall.Stat_t) if err := os.Chown(name, int(stat.Uid), int(stat.Gid)); err != nil { return err } return os.Remove(path) }) if err != nil { return nil, err } } // create the squashfs layer, with the root dir having 755 permissions if err := os.Chmod(dir, 0755); err != nil { return nil, err } path, layer, err := b.mksquashfs(dir) if err != nil { return nil, err } return layer, b.Store.Save(imageID, path, layer) }
// Loads a set of images into the repository. This is the complementary of ImageExport. // The input stream is an uncompressed tar ball containing images and metadata. func (s *TagStore) CmdLoad(job *engine.Job) engine.Status { tmpImageDir, err := ioutil.TempDir("", "docker-import-") if err != nil { return job.Error(err) } defer os.RemoveAll(tmpImageDir) var ( repoTarFile = path.Join(tmpImageDir, "repo.tar") repoDir = path.Join(tmpImageDir, "repo") ) tarFile, err := os.Create(repoTarFile) if err != nil { return job.Error(err) } if _, err := io.Copy(tarFile, job.Stdin); err != nil { return job.Error(err) } tarFile.Close() repoFile, err := os.Open(repoTarFile) if err != nil { return job.Error(err) } if err := os.Mkdir(repoDir, os.ModeDir); err != nil { return job.Error(err) } images, err := s.graph.Map() if err != nil { return job.Error(err) } excludes := make([]string, len(images)) i := 0 for k := range images { excludes[i] = k i++ } if err := archive.Untar(repoFile, repoDir, &archive.TarOptions{Excludes: excludes}); err != nil { return job.Error(err) } dirs, err := ioutil.ReadDir(repoDir) if err != nil { return job.Error(err) } for _, d := range dirs { if d.IsDir() { if err := s.recursiveLoad(job.Eng, d.Name(), tmpImageDir); err != nil { return job.Error(err) } } } repositoriesJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", "repositories")) if err == nil { repositories := map[string]Repository{} if err := json.Unmarshal(repositoriesJson, &repositories); err != nil { return job.Error(err) } for imageName, tagMap := range repositories { for tag, address := range tagMap { if err := s.Set(imageName, tag, address, true); err != nil { return job.Error(err) } } } } else if !os.IsNotExist(err) { return job.Error(err) } return engine.StatusOK }