func getTarFileBytes(file *os.File, path string) ([]byte, error) { _, err := file.Seek(0, 0) if err != nil { fmt.Errorf("error seeking file: %v", err) } var fileBytes []byte fileWalker := func(t *tarball.TarFile) error { if filepath.Clean(t.Name()) == path { fileBytes, err = ioutil.ReadAll(t.TarStream) if err != nil { return err } } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, fileWalker); err != nil { return nil, err } if fileBytes == nil { return nil, fmt.Errorf("file %q not found", path) } return fileBytes, nil }
func readLayer(r io.ReadSeeker, id string) (json, version []byte, layer *tar.Reader, err error) { if _, err = r.Seek(0, 0); err != nil { return } tr := tar.NewReader(r) for { var h *tar.Header h, err = tr.Next() if err == io.EOF { return nil, nil, nil, fmt.Errorf("layer not found") } if err != nil { return } if h.Typeflag != tar.TypeDir || h.Name != id+"/" { continue } if version, err = readFile(tr, id+"/VERSION"); err != nil { return } if json, err = readFile(tr, id+"/json"); err != nil { return } var r io.Reader r, err = nextFile(tr, id+"/layer.tar") if err != nil { return } layer = tar.NewReader(r) return } }
func (pa *PixelApi) UpdatePixel(w http.ResponseWriter, r *http.Request) { defer r.Body.Close() pixel := w.(httptools.VarsResponseWriter).Vars()["pixel"].(*Pixel) buf := &bytes.Buffer{} io.Copy(buf, r.Body) if buf.Len() <= 0 { http.Error(w, "Empty fs", http.StatusBadRequest) return } ctr, err := pa.cc.CreateContainer(tar.NewReader(bytes.NewReader(buf.Bytes())), nil) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } StopContainer(pixel.Container) pixel.Container = ctr pixel.Filesystem = fsObject(tar.NewReader(bytes.NewReader(buf.Bytes()))) pixel.LastImage = &bytes.Buffer{} pixel.Broken = false io.Copy(pixel.LastImage, bytes.NewReader(blackPixel.Bytes())) pa.Messages <- &Message{ Pixel: pixel.Id, Type: TypeChange, } go pa.pixelListener(pixel) http.Error(w, pixel.Id, http.StatusCreated) }
// DetectArchiveCompression takes a source reader and will determine the // compression type to use, if any. It will return a *tar.Reader that can be // used to read the archive. func DetectArchiveCompression(r io.Reader) (*tar.Reader, error) { var comp Decompressor // setup a buffered reader br := bufio.NewReader(r) // loop over the registered decompressors to find the right one for _, c := range decompressorTypes { if c.Detect(br) { comp = c break } } // Create the reader if a compression handler was found, else fall back on // using no compression. if comp != nil { // Create the reader arch, err := comp.NewReader(br) if err != nil { return nil, err } defer func() { if cl, ok := arch.(io.ReadCloser); ok { cl.Close() } }() return tar.NewReader(arch), nil } return tar.NewReader(br), nil }
// TarFileList ... func TarFileList(filename string) ([]string, error) { reader, err := os.Open(filename) if err != nil { return nil, err } defer reader.Close() var tarReader *tar.Reader if strings.HasSuffix(filename, ".gz") || strings.HasSuffix(filename, ".tgz") { gzipReader, err := gzip.NewReader(reader) if err != nil { return nil, err } tarReader = tar.NewReader(gzipReader) } else { tarReader = tar.NewReader(reader) } var files []string for { header, err := tarReader.Next() if err != nil { if err == io.EOF { break } return files, err } if header == nil { break } files = append(files, header.Name) } return files, nil }
func (t *TarInfo) Load(file io.ReadSeeker) { var reader *tar.Reader file.Seek(0, 0) gzipReader, err := gzip.NewReader(file) if err != nil { // likely not a gzip compressed file file.Seek(0, 0) reader = tar.NewReader(file) } else { reader = tar.NewReader(gzipReader) } for { header, err := reader.Next() if err == io.EOF { // end of tar file break } else if err != nil { // error occured logger.Debug("[TarInfoLoad] Error when reading tar stream tarsum. Disabling TarSum, TarFilesInfo. Error: %s", err.Error()) t.Error = TarError(err.Error()) return } t.TarSum.Append(header, reader) t.TarFilesInfo.Append(header) } }
func tarFilesCount(sourcefile string) int { flreader, _ := os.Open(sourcefile) defer flreader.Close() var fltarReader *tar.Reader var flReader io.ReadCloser = flreader if strings.HasSuffix(sourcefile, ".gz") || strings.HasSuffix(sourcefile, ".tgz") { flgzipReader, err := gzip.NewReader(flreader) checkErr("In tarFilesCounter - NewReader", err) fltarReader = tar.NewReader(flgzipReader) defer flReader.Close() } else if strings.HasSuffix(sourcefile, ".bz2") { flbz2Reader := bzip2.NewReader(flreader) fltarReader = tar.NewReader(flbz2Reader) } else { fltarReader = tar.NewReader(flreader) } trfl := fltarReader counter := 0 for { _, err := trfl.Next() if err != nil { if err == io.EOF { break } checkErr("Extract error::ReadTarArchive", err) } counter++ } fmt.Println("Files in archive -", counter) return counter }
// getTarReader returns a TarReaderCloser associated with the specified io.Reader. // // Gzip/Bzip2/XZ detection is done by using the magic numbers: // Gzip: the first two bytes should be 0x1f and 0x8b. Defined in the RFC1952. // Bzip2: the first three bytes should be 0x42, 0x5a and 0x68. No RFC. // XZ: the first three bytes should be 0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00. No RFC. func getTarReader(r io.Reader) (*TarReadCloser, error) { br := bufio.NewReader(r) header, err := br.Peek(readLen) if err == nil { switch { case bytes.HasPrefix(header, gzipHeader): gr, err := gzip.NewReader(br) if err != nil { return nil, err } return &TarReadCloser{tar.NewReader(gr), gr}, nil case bytes.HasPrefix(header, bzip2Header): bzip2r := ioutil.NopCloser(bzip2.NewReader(br)) return &TarReadCloser{tar.NewReader(bzip2r), bzip2r}, nil case bytes.HasPrefix(header, xzHeader): xzr, err := NewXzReader(br) if err != nil { return nil, err } return &TarReadCloser{tar.NewReader(xzr), xzr}, nil } } dr := ioutil.NopCloser(br) return &TarReadCloser{tar.NewReader(dr), dr}, nil }
func extractBz2(body []byte, location string) (string, error) { bodyCopy := make([]byte, len(body)) copy(bodyCopy, body) tarFile := bzip2.NewReader(bytes.NewReader(body)) tarReader := tar.NewReader(tarFile) var dirList []string for { header, err := tarReader.Next() if err == io.EOF { break } dirList = append(dirList, header.Name) } basedir := findBaseDir(dirList) tarFile = bzip2.NewReader(bytes.NewReader(bodyCopy)) tarReader = tar.NewReader(tarFile) for { header, err := tarReader.Next() if err == io.EOF { break } else if err != nil { //return location, err } path := filepath.Join(location, strings.Replace(header.Name, basedir, "", -1)) info := header.FileInfo() if info.IsDir() { if err = os.MkdirAll(path, info.Mode()); err != nil { return location, err } continue } if header.Typeflag == tar.TypeSymlink { err = os.Symlink(header.Linkname, path) continue } file, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, info.Mode()) if err == nil { defer file.Close() } _, err = io.Copy(file, tarReader) if err != nil { //return location, err } } return location, nil }
// Ungzip and untar from source file to destination directory // you need check file exist before you call this function func UnTarGz(srcFilePath string, destDirPath string) error { //fmt.Println("UnTarGzing " + srcFilePath + "...") // Create destination directory os.Mkdir(destDirPath, os.ModePerm) var tr *tar.Reader fr, err := os.Open(srcFilePath) if err != nil { return err } defer fr.Close() if strings.HasSuffix(srcFilePath, ".bz2") { br := bzip2.NewReader(fr) tr = tar.NewReader(br) } else { // Gzip reader gr, err := gzip.NewReader(fr) if err != nil { return err } defer gr.Close() // Tar reader tr = tar.NewReader(gr) } for { hdr, err := tr.Next() if err == io.EOF { // End of tar archive break } //handleError(err) //fmt.Println("UnTarGzing file..." + hdr.Name) // Check if it is diretory or file if hdr.Typeflag != tar.TypeDir { // Get files from archive // Create diretory before create file os.MkdirAll(destDirPath+"/"+path.Dir(hdr.Name), os.ModePerm) // Write data to file fw, _ := os.Create(destDirPath + "/" + hdr.Name) os.Chmod(destDirPath+"/"+hdr.Name, os.FileMode(hdr.Mode)) if err != nil { return err } _, err = io.Copy(fw, tr) if err != nil { return err } } } //fmt.Println("Well done!") return nil }
// Extract unpacks the tar reader that was passed into New(). This is // broken out from new to give the caller time to set various // settings in the Untar object. func (u *Untar) Extract() error { // check for detect mode before the main setup, we'll change compression // to the intended type and setup a new reader to re-read the header switch u.Compression { case NONE: u.archive = tar.NewReader(u.source) case DETECT: arch, err := DetectArchiveCompression(u.source) if err != nil { return err } u.archive = arch default: // Look up the compression handler comp, exists := decompressorTypes[string(u.Compression)] if !exists { return fmt.Errorf("unrecognized decompression type %q", u.Compression) } // Create the reader arch, err := comp.NewReader(u.source) if err != nil { return err } defer func() { if cl, ok := arch.(io.ReadCloser); ok { cl.Close() } }() u.archive = tar.NewReader(arch) } for { header, err := u.archive.Next() if err == io.EOF { // EOF, ok, break to return break } if err != nil { // See note on logging above. return err } err = u.processEntry(header) if err != nil { // See note on logging above. return err } } return nil }
func TestPortAssignment(t *testing.T) { buf := makeFs(map[string]interface{}{ "main.go": `package main import ( "fmt" "os" ) func main() { fmt.Printf("127.0.0.1:%s", os.Getenv("PORT")) }`, }) fs := tar.NewReader(bytes.NewReader(buf)) ctr1, err := lcc.CreateContainer(fs, nil) if err != nil { t.Fatalf("Could not start container: %s", err) } go ctr1.Cleanup() timer := time.AfterFunc(1*time.Second, func() { t.Fatalf("Timeout occured") }) ctr1.Wait() timer.Stop() fs = tar.NewReader(bytes.NewReader(buf)) ctr2, err := lcc.CreateContainer(fs, nil) if err != nil { t.Fatalf("Could not start container: %s", err) } go ctr2.Cleanup() timer = time.AfterFunc(1*time.Second, func() { t.Fatalf("Timeout occured") }) ctr2.Wait() timer.Stop() if ctr1.Logs() != ctr1.Address().String() { t.Fatalf("Specified and injected ports differ. Injected %s, got %s", ctr1.Address(), ctr1.Logs()) } if ctr2.Logs() != ctr2.Address().String() { t.Fatalf("Specified and injected ports differ. Injected %s, got %s", ctr2.Address(), ctr2.Logs()) } if ctr1.Logs() == ctr2.Logs() { t.Fatalf("Same port was assigned") } }
// getTarReader returns a tar.Reader associated with the specified io.Reader, // optionally backed by a gzip.Reader if gzip compression is detected. // // Gzip detection is done by using the magic numbers defined in the RFC1952 : // the first two bytes should be 0x1f and 0x8b.. func getTarReader(r io.Reader) (*tar.Reader, error) { br := bufio.NewReader(r) header, err := br.Peek(2) if err == nil && bytes.Equal(header, gzipHeader) { gr, err := gzip.NewReader(br) if err != nil { return nil, err } return tar.NewReader(gr), nil } return tar.NewReader(br), nil }
func ExampleClient_CopyFromContainer() { client, err := docker.NewClient("http://localhost:4243") if err != nil { log.Fatal(err) } cid := "a84849" var buf bytes.Buffer filename := "/tmp/output.txt" err = client.CopyFromContainer(docker.CopyFromContainerOptions{ Container: cid, Resource: filename, OutputStream: &buf, }) if err != nil { log.Fatalf("Error while copying from %s: %s\n", cid, err) } content := new(bytes.Buffer) r := bytes.NewReader(buf.Bytes()) tr := tar.NewReader(r) tr.Next() if err != nil && err != io.EOF { log.Fatal(err) } if _, err := io.Copy(content, tr); err != nil { log.Fatal(err) } log.Println(buf.String()) }
func fetchCamliSrc() { check(os.MkdirAll("/gopath/src/camlistore.org", 0777)) check(os.Chdir("/gopath/src/camlistore.org")) res, err := http.Get("https://camlistore.googlesource.com/camlistore/+archive/" + *rev + ".tar.gz") check(err) defer res.Body.Close() gz, err := gzip.NewReader(res.Body) check(err) defer gz.Close() tr := tar.NewReader(gz) for { h, err := tr.Next() if err == io.EOF { break } check(err) if h.Typeflag == tar.TypeDir { check(os.MkdirAll(h.Name, os.FileMode(h.Mode))) continue } f, err := os.Create(h.Name) check(err) n, err := io.Copy(f, tr) if err != nil && err != io.EOF { log.Fatal(err) } if n != h.Size { log.Fatalf("Error when creating %v: wanted %v bytes, got %v bytes", h.Name, h.Size, n) } check(f.Close()) } }
func testArchiveStr(t *testing.T, path string) []string { f, err := os.Open(path) if err != nil { t.Fatalf("err: %s", err) } defer f.Close() // Ungzip gzipR, err := gzip.NewReader(f) if err != nil { t.Fatalf("err: %s", err) } // Accumulator result := make([]string, 0, 10) // Untar tarR := tar.NewReader(gzipR) for { header, err := tarR.Next() if err == io.EOF { break } if err != nil { t.Fatalf("err: %s", err) } result = append(result, header.Name) } sort.Strings(result) return result }
func (s *DockerSuite) TestContainerApiCopy(c *check.C) { testRequires(c, DaemonIsLinux) name := "test-container-api-copy" dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") postData := types.CopyConfig{ Resource: "/test.txt", } status, body, err := sockRequest("POST", "/containers/"+name+"/copy", postData) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) found := false for tarReader := tar.NewReader(bytes.NewReader(body)); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } c.Fatal(err) } if h.Name == "test.txt" { found = true break } } c.Assert(found, check.Equals, true) }
func ImportTar(r io.Reader, ds dag.DAGService) (*dag.Node, error) { rall, err := ioutil.ReadAll(r) if err != nil { return nil, err } r = bytes.NewReader(rall) tr := tar.NewReader(r) root := new(dag.Node) root.Data = []byte("ipfs/tar") e := dagutil.NewDagEditor(root, ds) for { h, err := tr.Next() if err != nil { if err == io.EOF { break } return nil, err } header := new(dag.Node) headerBytes, err := marshalHeader(h) if err != nil { return nil, err } header.Data = headerBytes if h.Size > 0 { spl := chunk.NewRabin(tr, uint64(chunk.DefaultBlockSize)) nd, err := importer.BuildDagFromReader(ds, spl) if err != nil { return nil, err } err = header.AddNodeLinkClean("data", nd) if err != nil { return nil, err } } _, err = ds.Add(header) if err != nil { return nil, err } path := escapePath(h.Name) err = e.InsertNodeAtPath(context.Background(), path, header, func() *dag.Node { return new(dag.Node) }) if err != nil { return nil, err } } return e.Finalize(ds) }
func main() { var file *os.File var err error if file, err = os.Open("files/sample.tar.bz2"); err != nil { log.Fatalln(err) } defer file.Close() reader := tar.NewReader(bzip2.NewReader(file)) var header *tar.Header for { header, err = reader.Next() if err == io.EOF { // ファイルの最後 break } if err != nil { log.Fatalln(err) } buf := new(bytes.Buffer) if _, err = io.Copy(buf, reader); err != nil { log.Fatalln(err) } if err = ioutil.WriteFile("output/"+header.Name, buf.Bytes(), 0755); err != nil { log.Fatal(err) } } }
func (t *TapeArchive) Open(name string) (io.ReadCloser, int64, error) { f, err := os.Open(t.path) if err != nil { return nil, 0, err } r := tar.NewReader(f) for { h, err := r.Next() if err == io.EOF { break } if err != nil { return nil, 0, err } matched, err := filepath.Match(name, path.Base(h.Name)) if err != nil { return nil, 0, err } if matched { return &TapeArchiveEntry{r, f}, h.Size, nil } } _ = f.Close() return nil, 0, os.ErrNotExist }
func (s *DockerSuite) TestContainerApiGetExport(c *check.C) { name := "exportcontainer" runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "touch", "/test") out, _, err := runCommandWithOutput(runCmd) if err != nil { c.Fatalf("Error on container creation: %v, output: %q", err, out) } status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) c.Assert(status, check.Equals, http.StatusOK) c.Assert(err, check.IsNil) found := false for tarReader := tar.NewReader(bytes.NewReader(body)); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } c.Fatal(err) } if h.Name == "test" { found = true break } } if !found { c.Fatalf("The created test file has not been found in the exported image") } }
func prepareRepo(cacheKey string) (string, error) { path, err := ioutil.TempDir("", "repo-"+cacheKey) if err != nil { return "", err } res, err := http.Get(blobstoreCacheURL(cacheKey)) if err != nil { return "", err } defer res.Body.Close() if res.StatusCode == 404 { return path, initRepo(path) } if res.StatusCode != 200 { return "", fmt.Errorf("unexpected error %d retrieving cached repo", res.StatusCode) } r := tar.NewReader(res.Body) if err := archiver.Untar(path, r); err != nil { return "", err } if err := writeRepoHook(path); err != nil { return "", err } return path, nil }
func checkToolsContent(c *gc.C, data []byte, uploaded string) { zr, err := gzip.NewReader(bytes.NewReader(data)) c.Check(err, gc.IsNil) defer zr.Close() tr := tar.NewReader(zr) found := false for { hdr, err := tr.Next() if err == io.EOF { break } c.Check(err, gc.IsNil) if strings.ContainsAny(hdr.Name, "/\\") { c.Fail() } if hdr.Typeflag != tar.TypeReg { c.Fail() } content, err := ioutil.ReadAll(tr) c.Check(err, gc.IsNil) c.Check(string(content), gc.Equals, uploaded) found = true } c.Check(found, jc.IsTrue) }
// guiVersion retrieves the GUI version from the juju-gui-* directory included // in the bz2 archive at the given path. func guiVersion(path string) (version.Number, error) { var number version.Number f, err := os.Open(path) if err != nil { return number, errors.Annotate(err, "cannot open Juju GUI archive") } defer f.Close() prefix := "jujugui-" r := tar.NewReader(bzip2.NewReader(f)) for { hdr, err := r.Next() if err == io.EOF { break } if err != nil { return number, errors.New("cannot read Juju GUI archive") } info := hdr.FileInfo() if !info.IsDir() || !strings.HasPrefix(hdr.Name, prefix) { continue } n := info.Name()[len(prefix):] number, err = version.Parse(n) if err != nil { return number, errors.Errorf("cannot parse version %q", n) } return number, nil } return number, errors.New("cannot find Juju GUI version") }
// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } }
// NewCompressedTarReader creates a new TarReadCloser reading from the // given ACI image. // It is the caller's responsibility to call Close on the TarReadCloser // when done. func NewCompressedTarReader(rs io.ReadSeeker) (*TarReadCloser, error) { cr, err := NewCompressedReader(rs) if err != nil { return nil, err } return &TarReadCloser{tar.NewReader(cr), cr}, nil }
func getParent(file *os.File, imgID string) (string, error) { var parent string _, err := file.Seek(0, 0) if err != nil { return "", fmt.Errorf("error seeking file: %v", err) } jsonPath := filepath.Join(imgID, "json") parentWalker := func(t *tarball.TarFile) error { if filepath.Clean(t.Name()) == jsonPath { jsonb, err := ioutil.ReadAll(t.TarStream) if err != nil { return fmt.Errorf("error reading layer json: %v", err) } var dockerData types.DockerImageData if err := json.Unmarshal(jsonb, &dockerData); err != nil { return fmt.Errorf("error unmarshaling layer data: %v", err) } parent = dockerData.Parent } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, parentWalker); err != nil { return "", err } return parent, nil }
// Download pulls a file out of a container using `docker cp`. We have a source // path and want to write to an io.Writer, not a file. We use - to make docker // cp to write to stdout, and then copy the stream to our destination io.Writer. func (c *Communicator) Download(src string, dst io.Writer) error { log.Printf("Downloading file from container: %s:%s", c.ContainerId, src) localCmd := exec.Command("docker", "cp", fmt.Sprintf("%s:%s", c.ContainerId, src), "-") pipe, err := localCmd.StdoutPipe() if err != nil { return fmt.Errorf("Failed to open pipe: %s", err) } if err = localCmd.Start(); err != nil { return fmt.Errorf("Failed to start download: %s", err) } // When you use - to send docker cp to stdout it is streamed as a tar; this // enables it to work with directories. We don't actually support // directories in Download() but we still need to handle the tar format. archive := tar.NewReader(pipe) _, err = archive.Next() if err != nil { return fmt.Errorf("Failed to read header from tar stream: %s", err) } numBytes, err := io.Copy(dst, archive) if err != nil { return fmt.Errorf("Failed to pipe download: %s", err) } log.Printf("Copied %d bytes for %s", numBytes, src) if err = localCmd.Wait(); err != nil { return fmt.Errorf("Failed to download '%s' from container: %s", src, err) } return nil }
func (s *DockerSuite) TestContainerApiGetExport(c *check.C) { testRequires(c, DaemonIsLinux) name := "exportcontainer" dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) c.Assert(err, check.IsNil) c.Assert(status, check.Equals, http.StatusOK) found := false for tarReader := tar.NewReader(bytes.NewReader(body)); ; { h, err := tarReader.Next() if err != nil { if err == io.EOF { break } c.Fatal(err) } if h.Name == "test" { found = true break } } if !found { c.Fatalf("The created test file has not been found in the exported image") } }
// fetchFile fetches the specified directory from the given buildlet, and // writes the first file it finds in that directory to dest. func (b *Build) fetchFile(client *buildlet.Client, dest, dir string) error { b.logf("Downloading file from %q.", dir) tgz, err := client.GetTar(dir) if err != nil { return err } defer tgz.Close() zr, err := gzip.NewReader(tgz) if err != nil { return err } tr := tar.NewReader(zr) for { h, err := tr.Next() if err == io.EOF { return io.ErrUnexpectedEOF } if err != nil { return err } if !h.FileInfo().IsDir() { break } } return b.writeFile(dest, tr) }