func NewReader(path path.Path, dag mdag.DAGService, dagnode *mdag.Node, compression int) (*Reader, error) { reader := &Reader{ signalChan: make(chan struct{}), dag: dag, } var err error if compression != gzip.NoCompression { reader.gzipWriter, err = gzip.NewWriterLevel(&reader.buf, compression) if err != nil { return nil, err } reader.writer = tar.NewWriter(reader.gzipWriter) } else { reader.writer = tar.NewWriter(&reader.buf) } // writeToBuf will write the data to the buffer, and will signal when there // is new data to read _, filename := gopath.Split(path.String()) go reader.writeToBuf(dagnode, filename, 0) return reader, nil }
func MakeTarball(tarname string, fnames []string) error { log.Printf("tarring %d entrires to %s ...", len(fnames), tarname) tarfile, err := os.Create(tarname) if err != nil { return err } defer tarfile.Close() var tarwriter *tar.Writer if strings.HasSuffix(tarname, ".gz") { zipper := gzip.NewWriter(tarfile) defer zipper.Close() tarwriter = tar.NewWriter(zipper) /* } else if strings.HasSuffix(tarname, ".xz") { p := xz.WriterDefaults p.DictCap = 1 << 24 zipper, err := xz.NewWriterParams(tarfile, &p) //xz.NewWriter(tarfile) if err != nil { return err } defer zipper.Close() tarwriter = tar.NewWriter(zipper) */ } else { tarwriter = tar.NewWriter(tarfile) } defer tarwriter.Close() for _, fname := range fnames { realm, ts, good := util.Parse_FName(fname) if !good { log.Printf("warning: skip ill-named file '%s'", fname) continue // skip } data, err := util.Load(fname) if err != nil { return err } hdr := new(tar.Header) hdr.Name = util.Make_FName(realm, ts, false) hdr.Size = int64(len(data)) hdr.ModTime = ts hdr.Mode = 0644 err = tarwriter.WriteHeader(hdr) if err != nil { return err } log.Printf("tar %d bytes for file %s", hdr.Size, hdr.Name) _, err = tarwriter.Write(data) if err != nil { return err } } log.Printf("%s tarred without errors", tarname) return nil }
func (s *uploadSuite) createArchive(c *gc.C) { archive, err := os.Create(s.filename) c.Assert(err, jc.ErrorIsNil) defer archive.Close() compressed := gzip.NewWriter(archive) defer compressed.Close() tarball := tar.NewWriter(compressed) defer tarball.Close() var files = []struct{ Name, Body string }{ {"root.tar", "<state config files>"}, {"dump/oplog.bson", "<something here>"}, } for _, file := range files { hdr := &tar.Header{ Name: file.Name, Size: int64(len(file.Body)), } err := tarball.WriteHeader(hdr) c.Assert(err, jc.ErrorIsNil) _, err = tarball.Write([]byte(file.Body)) c.Assert(err, jc.ErrorIsNil) } }
func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) { // Test to make sure we stop people from trying to leave the // build context when specifying a symlink as the path to the dockerfile buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() if err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Typeflag: tar.TypeSymlink, Linkname: "/etc/passwd", }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) c.Assert(err, check.IsNil) out, err := readBody(body) if err != nil { c.Fatal(err) } // The reason the error is "Cannot locate specified Dockerfile" is because // in the builder, the symlink is resolved within the context, therefore // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is // a nonexistent file. if !strings.Contains(string(out), "Cannot locate specified Dockerfile: Dockerfile") { c.Fatalf("Didn't complain about leaving build context: %s", out) } }
// Archive writes the executable files found in the given directory in // gzipped tar format to w. func Archive(w io.Writer, dir string) error { entries, err := ioutil.ReadDir(dir) if err != nil { return err } gzw := gzip.NewWriter(w) defer closeErrorCheck(&err, gzw) tarw := tar.NewWriter(gzw) defer closeErrorCheck(&err, tarw) for _, ent := range entries { h := tarHeader(ent) logger.Debugf("adding entry: %#v", h) // ignore local umask if isExecutable(ent) { h.Mode = 0755 } else { h.Mode = 0644 } err := tarw.WriteHeader(h) if err != nil { return err } fileName := filepath.Join(dir, ent.Name()) if err := copyFile(tarw, fileName); err != nil { return err } } return nil }
func (c *PackCmd) Run() error { pkgdir := os.ExpandEnv("$GOPATH/pkg") c.log.Logv("Package dir: %s", pkgdir) files, err := ioutil.ReadDir(pkgdir) if err != nil { return err } writer := tar.NewWriter(c.writer) defer func() { if err := writer.Close(); err != nil { panic(err) } }() for _, f := range files { if f.IsDir() { c.log.Logv("OS/Arch: %s", f.Name()) if err := c.packLib(writer, f.Name()); err == nil { if err = c.packSource(writer, c.Paths); err != nil { panic(err) } } else if os.IsNotExist(err) { c.log.Logv("not built") continue // it might not be built for this combo } else { panic(err) } } } return nil }
func (a *Archive) writer() (*tharWriter, error) { writer := io.Writer(a.Stream) flushers := []flushableWriter{} closers := []closeableWriter{} if a.Options.GZip { if a.Options.GZipLevel > 0 { gw, err := gzip.NewWriterLevel(writer, a.Options.GZipLevel) if err != nil { return nil, err } flushers = append([]flushableWriter{gw}, flushers...) closers = append([]closeableWriter{gw}, closers...) writer = gw } else { writer = gzip.NewWriter(writer) } } tw := tar.NewWriter(writer) flushers = append([]flushableWriter{tw}, flushers...) return &tharWriter{ Writer: tw, Flushers: flushers, Closers: closers, }, nil }
// Backup will write a tar archive of any TSM files modified since the passed // in time to the passed in writer. The basePath will be prepended to the names // of the files in the archive. It will force a snapshot of the WAL first // then perform the backup with a read lock against the file store. This means // that new TSM files will not be able to be created in this shard while the // backup is running. For shards that are still acively getting writes, this // could cause the WAL to backup, increasing memory usage and evenutally rejecting writes. func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error { if err := e.WriteSnapshot(); err != nil { return err } e.FileStore.mu.RLock() defer e.FileStore.mu.RUnlock() var files []FileStat // grab all the files and tombstones that have a modified time after since for _, f := range e.FileStore.files { if stat := f.Stats(); stat.LastModified > since.UnixNano() { files = append(files, f.Stats()) } for _, t := range f.TombstoneFiles() { if t.LastModified > since.UnixNano() { files = append(files, f.Stats()) } } } tw := tar.NewWriter(w) defer tw.Close() for _, f := range files { if err := e.writeFileToBackup(f, basePath, tw); err != nil { return err } } return nil }
// newDebugInfo initializes the global debug handler. func newDebugInfo(name string, w io.Writer) (*debugInfo, error) { gz := gzip.NewWriter(w) d := &debugInfo{ name: name, w: w, gz: gz, tar: tar.NewWriter(gz), } // create the subdirs we need topHdr := &tar.Header{ Name: name, Typeflag: tar.TypeDir, Mode: 0755, } graphsHdr := &tar.Header{ Name: name + "/graphs", Typeflag: tar.TypeDir, Mode: 0755, } err := d.tar.WriteHeader(topHdr) // if the first errors, the second will too err = d.tar.WriteHeader(graphsHdr) if err != nil { return nil, err } return d, nil }
func newExportAction(c *cli.Context) { if len(c.Args()) > 1 { fmt.Fprintln(os.Stderr, "At most one host name could be provided.") os.Exit(1) } if _, err := depot.GetCertificateAuthority(d); isFileNotExist(err) { fmt.Fprintln(os.Stderr, "Please run 'ca-ctl init' to initial the depot.") os.Exit(1) } var files []*TarFile var err error if len(c.Args()) == 0 { files, err = getAuthFiles(c) } else { files, err = getHostFiles(c, c.Args()[0]) } if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } w := tar.NewWriter(os.Stdout) defer w.Close() if err = outputTarFiles(w, files); err != nil { fmt.Fprintln(os.Stderr, "Save tar error:", err) os.Exit(1) } }
func newTestTar(entries []*testTarEntry, dir string) (string, error) { t, err := ioutil.TempFile(dir, "tar") if err != nil { return "", err } defer t.Close() tw := tar.NewWriter(t) for _, entry := range entries { // Add default mode if entry.header.Mode == 0 { if entry.header.Typeflag == tar.TypeDir { entry.header.Mode = 0755 } else { entry.header.Mode = 0644 } } // Add calling user uid and gid or tests will fail entry.header.Uid = os.Getuid() entry.header.Gid = os.Getgid() if err := tw.WriteHeader(entry.header); err != nil { return "", err } if _, err := io.WriteString(tw, entry.contents); err != nil { return "", err } } if err := tw.Close(); err != nil { return "", err } return t.Name(), nil }
// createImageInput creates a tar reader using the given templates as files. // The given vars are replaced in all the templates encountered. func createImageInput(tmpls, vars map[string]string) (io.Reader, error) { var buf bytes.Buffer tarW := stdtar.NewWriter(&buf) header := &stdtar.Header{ Mode: 0644, } for path, tmpl := range tmpls { if vars != nil { for key, val := range vars { tmpl = strings.Replace(tmpl, "{{"+key+"}}", val, -1) } } header.Name = path header.Size = int64(len(tmpl)) err := tarW.WriteHeader(header) if err != nil { return nil, err } _, err = io.Copy(tarW, strings.NewReader(tmpl)) if err != nil { return nil, err } } return &buf, tarW.Close() }
func _tar_compress(call otto.FunctionCall) otto.Value { var ( baseDir string ) source, _ := call.Argument(0).ToString() target, _ := call.Argument(1).ToString() filename := filepath.Base(source) target = filepath.Join(target, fmt.Sprintf("%s.tar", filename)) tarfile, err := os.Create(target) if err != nil { jsThrow(call, err) } defer tarfile.Close() tarball := tar.NewWriter(tarfile) defer tarball.Close() info, err := os.Stat(source) if err != nil { jsThrow(call, err) } if info.IsDir() { baseDir = filepath.Base(source) } err = filepath.Walk(source, func(path string, info os.FileInfo, err error) error { if err != nil { return err } header, err := tar.FileInfoHeader(info, info.Name()) if err != nil { return err } if baseDir != "" { header.Name = filepath.Join(baseDir, strings.TrimPrefix(path, source)) } if err := tarball.WriteHeader(header); err != nil { return err } if info.IsDir() { return nil } file, err := os.Open(path) if err != nil { return err } defer file.Close() _, err = io.Copy(tarball, file) return err }) return otto.Value{} }
func TarGZ(files []ExportFile) ([]byte, error) { tarBuffer := new(bytes.Buffer) tarWriter := tar.NewWriter(tarBuffer) for _, file := range files { header := &tar.Header{ Name: file.Name, Mode: int64(file.Mode), Size: int64(len(file.Content)), ModTime: time.Now(), } if err := tarWriter.WriteHeader(header); err != nil { return nil, err } if _, err := tarWriter.Write(file.Content); err != nil { return nil, err } } if err := tarWriter.Close(); err != nil { return nil, err } zipBuffer := new(bytes.Buffer) zipWriter := gzip.NewWriter(zipBuffer) zipWriter.Write(tarBuffer.Bytes()) zipWriter.Close() return zipBuffer.Bytes(), nil }
func targz(ctx *cmd.Context, destination io.Writer, filepaths ...string) error { var buf bytes.Buffer tarWriter := tar.NewWriter(&buf) for _, path := range filepaths { if path == ".." { fmt.Fprintf(ctx.Stderr, "Warning: skipping %q", path) continue } fi, err := os.Lstat(path) if err != nil { return err } if fi.IsDir() { if len(filepaths) == 1 && path != "." { return singleDir(ctx, destination, path) } err = addDir(tarWriter, path) } else { err = addFile(tarWriter, path) } if err != nil { return err } } err := tarWriter.Close() if err != nil { return err } gzipWriter := gzip.NewWriter(destination) defer gzipWriter.Close() _, err = io.Copy(gzipWriter, &buf) return err }
func CreateTar() (buf bytes.Buffer, err error) { // Create a new tar archive. gw := gzip.NewWriter(&buf) defer gw.Close() tw := tar.NewWriter(gw) defer tw.Close() // Add some files to the archive. var files = []struct { Name, Body string }{ {"readme.txt", "This archive contains some text files."}, {"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"}, {"todo.txt", "Get animal handling licence."}, } for _, file := range files { hdr := &tar.Header{ Name: file.Name, Mode: 0600, Size: int64(len(file.Body)), } if err = tw.WriteHeader(hdr); err != nil { return } if _, err = tw.Write([]byte(file.Body)); err != nil { return } } return }
func generateArchive(t *testing.T, files archiveBuilder) *bytes.Reader { buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) for _, file := range files { hdr := &tar.Header{ Name: file.Name, Mode: 0600, Size: int64(len(file.Body)), } if err := tw.WriteHeader(hdr); err != nil { t.Fatal(err) } if _, err := tw.Write([]byte(file.Body)); err != nil { t.Fatal(err) } } if err := tw.Close(); err != nil { t.Fatal(err) } r := bytes.NewReader(buffer.Bytes()) return r }
// file name filelist is like this: './source/file' func TarFilelist(filelist []string, case_dir string, object_name string) (tar_url string) { tar_url = path.Join(case_dir, object_name) + ".tar.gz" fw, err := os.Create(tar_url) if err != nil { fmt.Println("Failed in create tar file ", err) return tar_url } defer fw.Close() gw := gzip.NewWriter(fw) defer gw.Close() tw := tar.NewWriter(gw) defer tw.Close() for index := 0; index < len(filelist); index++ { source_file := filelist[index] fi, err := os.Stat(path.Join(case_dir, source_file)) if err != nil { fmt.Println(err) continue } fr, err := os.Open(path.Join(case_dir, source_file)) if err != nil { fmt.Println(err) continue } h := new(tar.Header) h.Name = source_file h.Size = fi.Size() h.Mode = int64(fi.Mode()) h.ModTime = fi.ModTime() err = tw.WriteHeader(h) _, err = io.Copy(tw, fr) } return tar_url }
func entar(w io.Writer, dir string, ww io.Writer) error { msg.Write(ww, msg.User, []byte("entar\n")) tw := tar.NewWriter(w) err := filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { if err != nil { return err } hdr := new(tar.Header) hdr.Name = "./app" + path[len(dir):] hdr.Mode = int64(fi.Mode() & os.ModePerm) if fi.IsDir() { hdr.Typeflag = tar.TypeDir } else { hdr.Typeflag = tar.TypeReg hdr.Size = fi.Size() } if err = tw.WriteHeader(hdr); err != nil { return err } if !fi.IsDir() { var f *os.File f, err = os.Open(path) if err != nil { return err } _, err = io.Copy(tw, f) } return err }) if err != nil { return err } return tw.Close() }
func (ctx *runContext) createTarArchive(fd *os.File, dir string) error { tarFd := tar.NewWriter(fd) defer tarFd.Close() err := filepath.Walk(dir, func(name string, info os.FileInfo, err error) error { if err != nil { return errgo.Mask(err, errgo.Any) } if info.IsDir() { return nil } tarHeader, err := tar.FileInfoHeader(info, name) if err != nil { return fmt.Errorf("fail to build tar header: %v", err) } err = tarFd.WriteHeader(tarHeader) if err != nil { return fmt.Errorf("fail to write tar header: %v", err) } fileFd, err := os.OpenFile(name, os.O_RDONLY, 0600) if err != nil { return errgo.Mask(err, errgo.Any) } _, err = stdio.Copy(tarFd, fileFd) if err != nil { return errgo.Mask(err, errgo.Any) } return nil }) if err != nil { return errgo.Mask(err, errgo.Any) } return nil }
func newTestACI(usedotslash bool) (*os.File, error) { tf, err := ioutil.TempFile("", "") if err != nil { return nil, err } manifestBody := `{"acKind":"ImageManifest","acVersion":"0.8.1","name":"example.com/app"}` gw := gzip.NewWriter(tf) tw := tar.NewWriter(gw) manifestPath := "manifest" if usedotslash { manifestPath = "./" + manifestPath } hdr := &tar.Header{ Name: manifestPath, Size: int64(len(manifestBody)), } if err := tw.WriteHeader(hdr); err != nil { return nil, err } if _, err := tw.Write([]byte(manifestBody)); err != nil { return nil, err } if err := tw.Close(); err != nil { return nil, err } if err := gw.Close(); err != nil { return nil, err } return tf, nil }
func (cmd *Builder) writeACI() (string, error) { mode := os.O_CREATE | os.O_WRONLY | os.O_TRUNC filename, err := cmd.custom.GetImageFileName() if err != nil { return "", err } of, err := os.OpenFile(filename, mode, 0644) if err != nil { return "", fmt.Errorf("Error opening output file: %v", err) } defer of.Close() gw := gzip.NewWriter(of) defer gw.Close() tr := tar.NewWriter(gw) defer tr.Close() // FIXME: the files in the tar archive are added with the // wrong uid/gid. The uid/gid of the aci builder leaks in the // tar archive. See: https://github.com/appc/goaci/issues/16 iw := aci.NewImageWriter(*cmd.manifest, tr) paths := cmd.custom.GetCommonPaths() if err := filepath.Walk(paths.AciDir, aci.BuildWalker(paths.AciDir, iw, nil)); err != nil { return "", err } if err := iw.Close(); err != nil { return "", err } return of.Name(), nil }
func (dg *dockerGoClient) createScratchImageIfNotExists() error { client, err := dg.dockerClient() if err != nil { return err } scratchCreateLock.Lock() defer scratchCreateLock.Unlock() _, err = client.InspectImage(emptyvolume.Image + ":" + emptyvolume.Tag) if err == nil { // Already exists; assume that it's okay to use it return nil } reader, writer := io.Pipe() emptytarball := tar.NewWriter(writer) go func() { emptytarball.Close() writer.Close() }() // Create it from an empty tarball err = client.ImportImage(docker.ImportImageOptions{ Repository: emptyvolume.Image, Tag: emptyvolume.Tag, Source: "-", InputStream: reader, }) return err }
// TarGz implementation of Archiver. func TarGz(archiveFilename string, itemsToArchive []ArchiveItem) error { // file write fw, err := os.Create(archiveFilename) if err != nil { return err } defer fw.Close() // gzip write gw := gzip.NewWriter(fw) defer gw.Close() // tar write tw := tar.NewWriter(gw) defer tw.Close() for _, item := range itemsToArchive { err = addItemToTarGz(item, tw) if err != nil { return err } } err = tw.Close() return err }
func (s *DockerSuite) TestBuildApiDockerfilePath(c *check.C) { // Test to make sure we stop people from trying to leave the // build context when specifying the path to the dockerfile buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte("FROM busybox") if err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write(dockerfile); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } res, body, err := sockRequestRaw("POST", "/build?dockerfile=../Dockerfile", buffer, "application/x-tar") c.Assert(res.StatusCode, check.Equals, http.StatusInternalServerError) c.Assert(err, check.IsNil) out, err := readBody(body) if err != nil { c.Fatal(err) } if !strings.Contains(string(out), "must be within the build context") { c.Fatalf("Didn't complain about leaving build context: %s", out) } }
func tarProcfile(t *testing.T) string { buf := new(bytes.Buffer) tw := tar.NewWriter(buf) var files = []struct { Name, Body string }{ {"Procfile", "web: rails server"}, } for _, file := range files { hdr := &tar.Header{ Name: file.Name, Size: int64(len(file.Body)), } if err := tw.WriteHeader(hdr); err != nil { t.Fatal(err) } if _, err := tw.Write([]byte(file.Body)); err != nil { t.Fatal(err) } } if err := tw.Close(); err != nil { t.Fatal(err) } return buf.String() }
func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) emptyTarball, err := ioutil.TempFile("", "empty_tarball") if err != nil { c.Fatalf("Unable to create test file: %v", err) } tw := tar.NewWriter(emptyTarball) err = tw.Close() if err != nil { c.Fatalf("Error creating empty tarball: %v", err) } freader, err := os.Open(emptyTarball.Name()) if err != nil { c.Fatalf("Could not open test tarball: %v", err) } importCmd := exec.Command(dockerBinary, "import", "-", repoName) importCmd.Stdin = freader out, _, err := runCommandWithOutput(importCmd) if err != nil { c.Errorf("import failed with errors: %v, output: %q", err, out) } // Now verify we can push it if out, _, err := dockerCmdWithError("push", repoName); err != nil { c.Fatalf("pushing the image to the private registry has failed: %s, %v", out, err) } }
func (client *Client) Build(dockerfile, tag string) error { // Create a buffer to write our archive to. buf := new(bytes.Buffer) // Create a new tar archive. tw := tar.NewWriter(buf) hdr := &tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), } if err := tw.WriteHeader(hdr); err != nil { return err } if _, err := tw.Write([]byte(dockerfile)); err != nil { return err } if err := tw.Close(); err != nil { return err } url := fmt.Sprintf("%s/build?t=%s", client.URL, tag) _, err := http.Post(url, "", bytes.NewReader(buf.Bytes())) if err != nil { return err } return nil }
func (s *DockerSuite) TestBuildAPIRemoteTarballContext(c *check.C) { buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte("FROM busybox") err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }) // failed to write tar file header c.Assert(err, checker.IsNil) _, err = tw.Write(dockerfile) // failed to write tar file content c.Assert(err, checker.IsNil) // failed to close tar archive c.Assert(tw.Close(), checker.IsNil) server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ "testT.tar": buffer, }) c.Assert(err, checker.IsNil) defer server.Close() res, b, err := request.SockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar", daemonHost()) c.Assert(err, checker.IsNil) c.Assert(res.StatusCode, checker.Equals, http.StatusOK) b.Close() }
func (s *DockerSuite) TestBuildApiRemoteTarballContext(c *check.C) { buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte("FROM busybox") if err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write(dockerfile); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ "testT.tar": buffer, }) c.Assert(err, check.IsNil) defer server.Close() res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") c.Assert(err, check.IsNil) c.Assert(res.StatusCode, check.Equals, http.StatusOK) b.Close() }