Esempio n. 1
0
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
	type diffPathDriver interface {
		DiffPath(string) (string, func() error, error)
	}

	diffDriver, ok := ls.driver.(diffPathDriver)
	if !ok {
		diffDriver = &naiveDiffPathDriver{ls.driver}
	}

	defer metadata.Close()

	// get our relative path to the container
	fsPath, releasePath, err := diffDriver.DiffPath(graphID)
	if err != nil {
		return err
	}
	defer releasePath()

	metaUnpacker := storage.NewJSONUnpacker(metadata)
	upackerCounter := &unpackSizeCounter{metaUnpacker, size}
	fileGetter := storage.NewPathFileGetter(fsPath)
	logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath)
	return asm.WriteOutputTarStream(fileGetter, upackerCounter, w)
}
Esempio n. 2
0
func CommandAsm(c *cli.Context) {
	if len(c.Args()) > 0 {
		logrus.Warnf("%d additional arguments passed are ignored", len(c.Args()))
	}
	if len(c.String("input")) == 0 {
		logrus.Fatalf("--input filename must be set")
	}
	if len(c.String("output")) == 0 {
		logrus.Fatalf("--output filename must be set ([FILENAME|-])")
	}
	if len(c.String("path")) == 0 {
		logrus.Fatalf("--path must be set")
	}

	var outputStream io.Writer
	if c.String("output") == "-" {
		outputStream = os.Stdout
	} else {
		fh, err := os.Create(c.String("output"))
		if err != nil {
			logrus.Fatal(err)
		}
		defer fh.Close()
		outputStream = fh
	}

	// Get the tar metadata reader
	mf, err := os.Open(c.String("input"))
	if err != nil {
		logrus.Fatal(err)
	}
	defer mf.Close()
	mfz, err := gzip.NewReader(mf)
	if err != nil {
		logrus.Fatal(err)
	}
	defer mfz.Close()

	metaUnpacker := storage.NewJSONUnpacker(mfz)
	// XXX maybe get the absolute path here
	fileGetter := storage.NewPathFileGetter(c.String("path"))

	ots := asm.NewOutputTarStream(fileGetter, metaUnpacker)
	defer ots.Close()
	i, err := io.Copy(outputStream, ots)
	if err != nil {
		logrus.Fatal(err)
	}

	logrus.Infof("created %s from %s and %s (wrote %d bytes)", c.String("output"), c.String("path"), c.String("input"), i)
}
Esempio n. 3
0
func BenchmarkAsm(b *testing.B) {
	for i := 0; i < b.N; i++ {
		for _, tc := range testCases {
			func() {
				fh, err := os.Open(tc.path)
				if err != nil {
					b.Fatal(err)
				}
				defer fh.Close()
				gzRdr, err := gzip.NewReader(fh)
				if err != nil {
					b.Fatal(err)
				}
				defer gzRdr.Close()

				// Setup where we'll store the metadata
				w := bytes.NewBuffer([]byte{})
				sp := storage.NewJSONPacker(w)
				fgp := storage.NewBufferFileGetPutter()

				// wrap the disassembly stream
				tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
				if err != nil {
					b.Fatal(err)
				}
				// read it all to the bit bucket
				i1, err := io.Copy(ioutil.Discard, tarStream)
				if err != nil {
					b.Fatal(err)
				}

				r := bytes.NewBuffer(w.Bytes())
				sup := storage.NewJSONUnpacker(r)
				// and reuse the fgp that we Put the payloads to.

				rc := NewOutputTarStream(fgp, sup)

				i2, err := io.Copy(ioutil.Discard, rc)
				if err != nil {
					b.Fatal(err)
				}
				if i1 != i2 {
					b.Errorf("%s: input(%d) and ouput(%d) byte count didn't match", tc.path, i1, i2)
				}
			}()
		}
	}
}
Esempio n. 4
0
func (graph *Graph) assembleTarLayer(img *image.Image) (io.ReadCloser, error) {
	root := graph.imageRoot(img.ID)
	mFileName := filepath.Join(root, tarDataFileName)
	mf, err := os.Open(mFileName)
	if err != nil {
		if !os.IsNotExist(err) {
			logrus.Errorf("failed to open %q: %s", mFileName, err)
		}
		return nil, err
	}
	pR, pW := io.Pipe()
	// this will need to be in a goroutine, as we are returning the stream of a
	// tar archive, but can not close the metadata reader early (when this
	// function returns)...
	go func() {
		defer mf.Close()
		// let's reassemble!
		logrus.Debugf("[graph] TarLayer with reassembly: %s", img.ID)
		mfz, err := gzip.NewReader(mf)
		if err != nil {
			pW.CloseWithError(fmt.Errorf("[graph] error with %s:  %s", mFileName, err))
			return
		}
		defer mfz.Close()

		// get our relative path to the container
		fsLayer, err := graph.driver.Get(img.ID, "")
		if err != nil {
			pW.CloseWithError(err)
			return
		}
		defer graph.driver.Put(img.ID)

		metaUnpacker := storage.NewJSONUnpacker(mfz)
		fileGetter := storage.NewPathFileGetter(fsLayer)
		logrus.Debugf("[graph] %s is at %q", img.ID, fsLayer)
		ots := asm.NewOutputTarStream(fileGetter, metaUnpacker)
		defer ots.Close()
		if _, err := io.Copy(pW, ots); err != nil {
			pW.CloseWithError(err)
			return
		}
		pW.Close()
	}()
	return pR, nil
}
Esempio n. 5
0
func (c *checksums) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
	defer metadata.Close()

	// get our relative path to the container
	fsPath, release, err := c.driver.Mount(graphID)
	if err != nil {
		return err
	}
	if release != nil {
		defer release()
	}

	metaUnpacker := storage.NewJSONUnpacker(metadata)
	upackerCounter := &unpackSizeCounter{metaUnpacker, size}
	fileGetter := storage.NewPathFileGetter(fsPath)
	logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath)
	return asm.WriteOutputTarStream(fileGetter, upackerCounter, w)
}
Esempio n. 6
0
func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error {
	diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver)
	if !ok {
		diffDriver = &naiveDiffPathDriver{ls.driver}
	}

	defer metadata.Close()

	// get our relative path to the container
	fileGetCloser, err := diffDriver.DiffGetter(graphID)
	if err != nil {
		return err
	}
	defer fileGetCloser.Close()

	metaUnpacker := storage.NewJSONUnpacker(metadata)
	upackerCounter := &unpackSizeCounter{metaUnpacker, size}
	logrus.Debugf("Assembling tar data for %s", graphID)
	return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w)
}
Esempio n. 7
0
func (ls *layerStore) assembleTar(graphID string, metadata io.ReadCloser, size *int64) (io.ReadCloser, error) {
	type diffPathDriver interface {
		DiffPath(string) (string, func() error, error)
	}

	diffDriver, ok := ls.driver.(diffPathDriver)
	if !ok {
		diffDriver = &naiveDiffPathDriver{ls.driver}
	}

	// get our relative path to the container
	fsPath, releasePath, err := diffDriver.DiffPath(graphID)
	if err != nil {
		metadata.Close()
		return nil, err
	}

	pR, pW := io.Pipe()
	// this will need to be in a goroutine, as we are returning the stream of a
	// tar archive, but can not close the metadata reader early (when this
	// function returns)...
	go func() {
		defer releasePath()
		defer metadata.Close()

		metaUnpacker := storage.NewJSONUnpacker(metadata)
		upackerCounter := &unpackSizeCounter{metaUnpacker, size}
		fileGetter := storage.NewPathFileGetter(fsPath)
		logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath)
		ots := asm.NewOutputTarStream(fileGetter, upackerCounter)
		defer ots.Close()
		if _, err := io.Copy(pW, ots); err != nil {
			pW.CloseWithError(err)
			return
		}
		pW.Close()
	}()
	return pR, nil
}
Esempio n. 8
0
func TestTarStream(t *testing.T) {
	var (
		expectedSum        = "1eb237ff69bca6e22789ecb05b45d35ca307adbd"
		expectedSize int64 = 10240
	)

	fh, err := os.Open("./testdata/t.tar.gz")
	if err != nil {
		t.Fatal(err)
	}
	defer fh.Close()
	gzRdr, err := gzip.NewReader(fh)
	if err != nil {
		t.Fatal(err)
	}
	defer gzRdr.Close()

	// Setup where we'll store the metadata
	w := bytes.NewBuffer([]byte{})
	sp := storage.NewJSONPacker(w)
	fgp := storage.NewBufferFileGetPutter()

	// wrap the disassembly stream
	tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
	if err != nil {
		t.Fatal(err)
	}

	// get a sum of the stream after it has passed through to ensure it's the same.
	h0 := sha1.New()
	tRdr0 := io.TeeReader(tarStream, h0)

	// read it all to the bit bucket
	i, err := io.Copy(ioutil.Discard, tRdr0)
	if err != nil {
		t.Fatal(err)
	}

	if i != expectedSize {
		t.Errorf("size of tar: expected %d; got %d", expectedSize, i)
	}
	if fmt.Sprintf("%x", h0.Sum(nil)) != expectedSum {
		t.Fatalf("checksum of tar: expected %s; got %x", expectedSum, h0.Sum(nil))
	}

	t.Logf("%s", w.String()) // if we fail, then show the packed info

	// If we've made it this far, then we'll turn it around and create a tar
	// stream from the packed metadata and buffered file contents.
	r := bytes.NewBuffer(w.Bytes())
	sup := storage.NewJSONUnpacker(r)
	// and reuse the fgp that we Put the payloads to.

	rc := NewOutputTarStream(fgp, sup)
	h1 := sha1.New()
	i, err = io.Copy(h1, rc)
	if err != nil {
		t.Fatal(err)
	}

	if i != expectedSize {
		t.Errorf("size of output tar: expected %d; got %d", expectedSize, i)
	}
	if fmt.Sprintf("%x", h1.Sum(nil)) != expectedSum {
		t.Fatalf("checksum of output tar: expected %s; got %x", expectedSum, h1.Sum(nil))
	}
}
Esempio n. 9
0
func TestTarStream(t *testing.T) {
	testCases := []struct {
		path            string
		expectedSHA1Sum string
		expectedSize    int64
	}{
		{"./testdata/t.tar.gz", "1eb237ff69bca6e22789ecb05b45d35ca307adbd", 10240},
		{"./testdata/longlink.tar.gz", "d9f6babe107b7247953dff6b5b5ae31a3a880add", 20480},
		{"./testdata/fatlonglink.tar.gz", "8537f03f89aeef537382f8b0bb065d93e03b0be8", 26234880},
		{"./testdata/iso-8859.tar.gz", "ddafa51cb03c74ec117ab366ee2240d13bba1ec3", 10240},
	}

	for _, tc := range testCases {
		fh, err := os.Open(tc.path)
		if err != nil {
			t.Fatal(err)
		}
		defer fh.Close()
		gzRdr, err := gzip.NewReader(fh)
		if err != nil {
			t.Fatal(err)
		}
		defer gzRdr.Close()

		// Setup where we'll store the metadata
		w := bytes.NewBuffer([]byte{})
		sp := storage.NewJSONPacker(w)
		fgp := storage.NewBufferFileGetPutter()

		// wrap the disassembly stream
		tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
		if err != nil {
			t.Fatal(err)
		}

		// get a sum of the stream after it has passed through to ensure it's the same.
		h0 := sha1.New()
		tRdr0 := io.TeeReader(tarStream, h0)

		// read it all to the bit bucket
		i, err := io.Copy(ioutil.Discard, tRdr0)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h0.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of tar: expected %s; got %x", tc.expectedSHA1Sum, h0.Sum(nil))
		}

		//t.Logf("%s", w.String()) // if we fail, then show the packed info

		// If we've made it this far, then we'll turn it around and create a tar
		// stream from the packed metadata and buffered file contents.
		r := bytes.NewBuffer(w.Bytes())
		sup := storage.NewJSONUnpacker(r)
		// and reuse the fgp that we Put the payloads to.

		rc := NewOutputTarStream(fgp, sup)
		h1 := sha1.New()
		i, err = io.Copy(h1, rc)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of output tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h1.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of output tar: expected %s; got %x", tc.expectedSHA1Sum, h1.Sum(nil))
		}
	}
}
Esempio n. 10
0
func TestTarStream(t *testing.T) {

	for _, tc := range testCases {
		fh, err := os.Open(tc.path)
		if err != nil {
			t.Fatal(err)
		}
		defer fh.Close()
		gzRdr, err := gzip.NewReader(fh)
		if err != nil {
			t.Fatal(err)
		}
		defer gzRdr.Close()

		// Setup where we'll store the metadata
		w := bytes.NewBuffer([]byte{})
		sp := storage.NewJSONPacker(w)
		fgp := storage.NewBufferFileGetPutter()

		// wrap the disassembly stream
		tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
		if err != nil {
			t.Fatal(err)
		}

		// get a sum of the stream after it has passed through to ensure it's the same.
		h0 := sha1.New()
		i, err := io.Copy(h0, tarStream)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h0.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of tar: expected %s; got %x", tc.expectedSHA1Sum, h0.Sum(nil))
		}

		//t.Logf("%s", w.String()) // if we fail, then show the packed info

		// If we've made it this far, then we'll turn it around and create a tar
		// stream from the packed metadata and buffered file contents.
		r := bytes.NewBuffer(w.Bytes())
		sup := storage.NewJSONUnpacker(r)
		// and reuse the fgp that we Put the payloads to.

		rc := NewOutputTarStream(fgp, sup)
		h1 := sha1.New()
		i, err = io.Copy(h1, rc)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of output tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h1.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of output tar: expected %s; got %x", tc.expectedSHA1Sum, h1.Sum(nil))
		}
	}
}