示例#1
0
文件: layer_store.go 项目: vmware/vic
func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error {
	digester := digest.Canonical.New()
	tr := io.TeeReader(ts, digester.Hash())

	tsw, err := tx.TarSplitWriter(true)
	if err != nil {
		return err
	}
	metaPacker := storage.NewJSONPacker(tsw)
	defer tsw.Close()

	// we're passing nil here for the file putter, because the ApplyDiff will
	// handle the extraction of the archive
	rdr, err := asm.NewInputTarStream(tr, metaPacker, nil)
	if err != nil {
		return err
	}

	applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr))
	if err != nil {
		return err
	}

	// Discard trailing data but ensure metadata is picked up to reconstruct stream
	io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed

	layer.size = applySize
	layer.diffID = DiffID(digester.Digest())

	logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize)

	return nil
}
示例#2
0
func (c *checksums) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID layer.DiffID, size int64, err error) {
	rawarchive, err := c.driver.TarStream(id, parent)
	if err != nil {
		return
	}
	defer rawarchive.Close()

	f, err := os.Create(newTarDataPath)
	if err != nil {
		return
	}
	defer f.Close()
	mfz := gzip.NewWriter(f)
	defer mfz.Close()
	metaPacker := storage.NewJSONPacker(mfz)

	packerCounter := &packSizeCounter{metaPacker, &size}

	archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil)
	if err != nil {
		return
	}
	dgst, err := digest.FromReader(archive)
	if err != nil {
		return
	}
	diffID = layer.DiffID(dgst)
	return
}
示例#3
0
func (graph *Graph) disassembleAndApplyTarLayer(id, parent string, layerData archive.ArchiveReader, root string) (size int64, err error) {
	// this is saving the tar-split metadata
	mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
	if err != nil {
		return 0, err
	}
	mfz := gzip.NewWriter(mf)
	metaPacker := storage.NewJSONPacker(mfz)
	defer mf.Close()
	defer mfz.Close()

	inflatedLayerData, err := archive.DecompressStream(layerData)
	if err != nil {
		return 0, err
	}

	// we're passing nil here for the file putter, because the ApplyDiff will
	// handle the extraction of the archive
	rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil)
	if err != nil {
		return 0, err
	}

	if size, err = graph.driver.ApplyDiff(id, parent, archive.ArchiveReader(rdr)); err != nil {
		return 0, err
	}

	return
}
示例#4
0
func BenchmarkAsm(b *testing.B) {
	for i := 0; i < b.N; i++ {
		for _, tc := range testCases {
			func() {
				fh, err := os.Open(tc.path)
				if err != nil {
					b.Fatal(err)
				}
				defer fh.Close()
				gzRdr, err := gzip.NewReader(fh)
				if err != nil {
					b.Fatal(err)
				}
				defer gzRdr.Close()

				// Setup where we'll store the metadata
				w := bytes.NewBuffer([]byte{})
				sp := storage.NewJSONPacker(w)
				fgp := storage.NewBufferFileGetPutter()

				// wrap the disassembly stream
				tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
				if err != nil {
					b.Fatal(err)
				}
				// read it all to the bit bucket
				i1, err := io.Copy(ioutil.Discard, tarStream)
				if err != nil {
					b.Fatal(err)
				}

				r := bytes.NewBuffer(w.Bytes())
				sup := storage.NewJSONUnpacker(r)
				// and reuse the fgp that we Put the payloads to.

				rc := NewOutputTarStream(fgp, sup)

				i2, err := io.Copy(ioutil.Discard, rc)
				if err != nil {
					b.Fatal(err)
				}
				if i1 != i2 {
					b.Errorf("%s: input(%d) and ouput(%d) byte count didn't match", tc.path, i1, i2)
				}
			}()
		}
	}
}
示例#5
0
func CommandDisasm(c *cli.Context) {
	if len(c.Args()) != 1 {
		logrus.Fatalf("please specify tar to be disabled <NAME|->")
	}
	if len(c.String("output")) == 0 {
		logrus.Fatalf("--output filename must be set")
	}

	// Set up the tar input stream
	var inputStream io.Reader
	if c.Args()[0] == "-" {
		inputStream = os.Stdin
	} else {
		fh, err := os.Open(c.Args()[0])
		if err != nil {
			logrus.Fatal(err)
		}
		defer fh.Close()
		inputStream = fh
	}

	// Set up the metadata storage
	mf, err := os.OpenFile(c.String("output"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
	if err != nil {
		logrus.Fatal(err)
	}
	defer mf.Close()
	mfz := gzip.NewWriter(mf)
	defer mfz.Close()
	metaPacker := storage.NewJSONPacker(mfz)

	// we're passing nil here for the file putter, because the ApplyDiff will
	// handle the extraction of the archive
	its, err := asm.NewInputTarStream(inputStream, metaPacker, nil)
	if err != nil {
		logrus.Fatal(err)
	}
	i, err := io.Copy(os.Stdout, its)
	if err != nil {
		logrus.Fatal(err)
	}
	logrus.Infof("created %s from %s (read %d bytes)", c.String("output"), c.Args()[0], i)
}
示例#6
0
文件: graph.go 项目: ranid/docker
func (graph *Graph) disassembleAndApplyTarLayer(img *image.Image, layerData io.Reader, root string) (err error) {
	var ar io.Reader

	if graph.tarSplitDisabled {
		ar = layerData
	} else {
		// this is saving the tar-split metadata
		mf, err := os.OpenFile(filepath.Join(root, tarDataFileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
		if err != nil {
			return err
		}

		mfz := gzip.NewWriter(mf)
		metaPacker := storage.NewJSONPacker(mfz)
		defer mf.Close()
		defer mfz.Close()

		inflatedLayerData, err := archive.DecompressStream(layerData)
		if err != nil {
			return err
		}

		// we're passing nil here for the file putter, because the ApplyDiff will
		// handle the extraction of the archive
		rdr, err := asm.NewInputTarStream(inflatedLayerData, metaPacker, nil)
		if err != nil {
			return err
		}

		ar = archive.Reader(rdr)
	}

	if img.Size, err = graph.driver.ApplyDiff(img.ID, img.Parent, ar); err != nil {
		return err
	}

	return nil
}
示例#7
0
func writeTarSplitFile(name string, tarContent []byte) error {
	f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
	if err != nil {
		return err
	}
	defer f.Close()

	fz := gzip.NewWriter(f)

	metaPacker := storage.NewJSONPacker(fz)
	defer fz.Close()

	rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil)
	if err != nil {
		return err
	}

	if _, err := io.Copy(ioutil.Discard, rdr); err != nil {
		return err
	}

	return nil
}
示例#8
0
func TestTarStream(t *testing.T) {
	var (
		expectedSum        = "1eb237ff69bca6e22789ecb05b45d35ca307adbd"
		expectedSize int64 = 10240
	)

	fh, err := os.Open("./testdata/t.tar.gz")
	if err != nil {
		t.Fatal(err)
	}
	defer fh.Close()
	gzRdr, err := gzip.NewReader(fh)
	if err != nil {
		t.Fatal(err)
	}
	defer gzRdr.Close()

	// Setup where we'll store the metadata
	w := bytes.NewBuffer([]byte{})
	sp := storage.NewJSONPacker(w)
	fgp := storage.NewBufferFileGetPutter()

	// wrap the disassembly stream
	tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
	if err != nil {
		t.Fatal(err)
	}

	// get a sum of the stream after it has passed through to ensure it's the same.
	h0 := sha1.New()
	tRdr0 := io.TeeReader(tarStream, h0)

	// read it all to the bit bucket
	i, err := io.Copy(ioutil.Discard, tRdr0)
	if err != nil {
		t.Fatal(err)
	}

	if i != expectedSize {
		t.Errorf("size of tar: expected %d; got %d", expectedSize, i)
	}
	if fmt.Sprintf("%x", h0.Sum(nil)) != expectedSum {
		t.Fatalf("checksum of tar: expected %s; got %x", expectedSum, h0.Sum(nil))
	}

	t.Logf("%s", w.String()) // if we fail, then show the packed info

	// If we've made it this far, then we'll turn it around and create a tar
	// stream from the packed metadata and buffered file contents.
	r := bytes.NewBuffer(w.Bytes())
	sup := storage.NewJSONUnpacker(r)
	// and reuse the fgp that we Put the payloads to.

	rc := NewOutputTarStream(fgp, sup)
	h1 := sha1.New()
	i, err = io.Copy(h1, rc)
	if err != nil {
		t.Fatal(err)
	}

	if i != expectedSize {
		t.Errorf("size of output tar: expected %d; got %d", expectedSize, i)
	}
	if fmt.Sprintf("%x", h1.Sum(nil)) != expectedSum {
		t.Fatalf("checksum of output tar: expected %s; got %x", expectedSum, h1.Sum(nil))
	}
}
示例#9
0
func TestTarStream(t *testing.T) {
	testCases := []struct {
		path            string
		expectedSHA1Sum string
		expectedSize    int64
	}{
		{"./testdata/t.tar.gz", "1eb237ff69bca6e22789ecb05b45d35ca307adbd", 10240},
		{"./testdata/longlink.tar.gz", "d9f6babe107b7247953dff6b5b5ae31a3a880add", 20480},
		{"./testdata/fatlonglink.tar.gz", "8537f03f89aeef537382f8b0bb065d93e03b0be8", 26234880},
		{"./testdata/iso-8859.tar.gz", "ddafa51cb03c74ec117ab366ee2240d13bba1ec3", 10240},
	}

	for _, tc := range testCases {
		fh, err := os.Open(tc.path)
		if err != nil {
			t.Fatal(err)
		}
		defer fh.Close()
		gzRdr, err := gzip.NewReader(fh)
		if err != nil {
			t.Fatal(err)
		}
		defer gzRdr.Close()

		// Setup where we'll store the metadata
		w := bytes.NewBuffer([]byte{})
		sp := storage.NewJSONPacker(w)
		fgp := storage.NewBufferFileGetPutter()

		// wrap the disassembly stream
		tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
		if err != nil {
			t.Fatal(err)
		}

		// get a sum of the stream after it has passed through to ensure it's the same.
		h0 := sha1.New()
		tRdr0 := io.TeeReader(tarStream, h0)

		// read it all to the bit bucket
		i, err := io.Copy(ioutil.Discard, tRdr0)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h0.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of tar: expected %s; got %x", tc.expectedSHA1Sum, h0.Sum(nil))
		}

		//t.Logf("%s", w.String()) // if we fail, then show the packed info

		// If we've made it this far, then we'll turn it around and create a tar
		// stream from the packed metadata and buffered file contents.
		r := bytes.NewBuffer(w.Bytes())
		sup := storage.NewJSONUnpacker(r)
		// and reuse the fgp that we Put the payloads to.

		rc := NewOutputTarStream(fgp, sup)
		h1 := sha1.New()
		i, err = io.Copy(h1, rc)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of output tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h1.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of output tar: expected %s; got %x", tc.expectedSHA1Sum, h1.Sum(nil))
		}
	}
}
示例#10
0
func (ls *layerStore) migrateLayer(tx MetadataTransaction, tarDataFile string, layer *roLayer) error {
	var ar io.Reader
	var tdf *os.File
	var err error
	if tarDataFile != "" {
		tdf, err = os.Open(tarDataFile)
		if err != nil {
			if !os.IsNotExist(err) {
				return err
			}
			tdf = nil
		}
		defer tdf.Close()
	}
	if tdf != nil {
		tsw, err := tx.TarSplitWriter()
		if err != nil {
			return err
		}

		defer tsw.Close()

		uncompressed, err := gzip.NewReader(tdf)
		if err != nil {
			return err
		}
		defer uncompressed.Close()

		tr := io.TeeReader(uncompressed, tsw)
		trc := ioutils.NewReadCloserWrapper(tr, uncompressed.Close)

		ar, err = ls.assembleTar(layer.cacheID, trc, &layer.size)
		if err != nil {
			return err
		}

	} else {
		var graphParent string
		if layer.parent != nil {
			graphParent = layer.parent.cacheID
		}
		archiver, err := ls.driver.Diff(layer.cacheID, graphParent)
		if err != nil {
			return err
		}
		defer archiver.Close()

		tsw, err := tx.TarSplitWriter()
		if err != nil {
			return err
		}
		metaPacker := storage.NewJSONPacker(tsw)
		packerCounter := &packSizeCounter{metaPacker, &layer.size}
		defer tsw.Close()

		ar, err = asm.NewInputTarStream(archiver, packerCounter, nil)
		if err != nil {
			return err
		}
	}

	digester := digest.Canonical.New()
	_, err = io.Copy(digester.Hash(), ar)
	if err != nil {
		return err
	}

	layer.diffID = DiffID(digester.Digest())

	return nil
}
示例#11
0
func main() {
	flag.Parse()

	for _, arg := range flag.Args() {
		fh, err := os.Open(arg)
		if err != nil {
			log.Fatal(err)
		}
		defer fh.Close()
		fi, err := fh.Stat()
		if err != nil {
			log.Fatal(err)
		}
		fmt.Printf("inspecting %q (size %dk)\n", fh.Name(), fi.Size()/1024)

		packFh, err := ioutil.TempFile("", "packed.")
		if err != nil {
			log.Fatal(err)
		}
		defer packFh.Close()
		if *flCleanup {
			defer os.Remove(packFh.Name())
		}

		sp := storage.NewJSONPacker(packFh)
		fp := storage.NewDiscardFilePutter()
		dissam, err := asm.NewInputTarStream(fh, sp, fp)
		if err != nil {
			log.Fatal(err)
		}

		var num int
		tr := tar.NewReader(dissam)
		for {
			_, err = tr.Next()
			if err != nil {
				if err == io.EOF {
					break
				}
				log.Fatal(err)
			}
			num++
			if _, err := io.Copy(ioutil.Discard, tr); err != nil {
				log.Fatal(err)
			}
		}
		fmt.Printf(" -- number of files: %d\n", num)

		if err := packFh.Sync(); err != nil {
			log.Fatal(err)
		}

		fi, err = packFh.Stat()
		if err != nil {
			log.Fatal(err)
		}
		fmt.Printf(" -- size of metadata uncompressed: %dk\n", fi.Size()/1024)

		gzPackFh, err := ioutil.TempFile("", "packed.gz.")
		if err != nil {
			log.Fatal(err)
		}
		defer gzPackFh.Close()
		if *flCleanup {
			defer os.Remove(gzPackFh.Name())
		}

		gzWrtr := gzip.NewWriter(gzPackFh)

		if _, err := packFh.Seek(0, 0); err != nil {
			log.Fatal(err)
		}

		if _, err := io.Copy(gzWrtr, packFh); err != nil {
			log.Fatal(err)
		}
		gzWrtr.Close()

		if err := gzPackFh.Sync(); err != nil {
			log.Fatal(err)
		}

		fi, err = gzPackFh.Stat()
		if err != nil {
			log.Fatal(err)
		}
		fmt.Printf(" -- size of gzip compressed metadata: %dk\n", fi.Size()/1024)
	}
}
示例#12
0
func CommandChecksize(c *cli.Context) {
	if len(c.Args()) == 0 {
		logrus.Fatalf("please specify tar archives to check ('-' will check stdin)")
	}
	for _, arg := range c.Args() {
		fh, err := os.Open(arg)
		if err != nil {
			log.Fatal(err)
		}
		defer fh.Close()
		fi, err := fh.Stat()
		if err != nil {
			log.Fatal(err)
		}
		fmt.Printf("inspecting %q (size %dk)\n", fh.Name(), fi.Size()/1024)

		packFh, err := ioutil.TempFile("", "packed.")
		if err != nil {
			log.Fatal(err)
		}
		defer packFh.Close()
		if !c.Bool("work") {
			defer os.Remove(packFh.Name())
		} else {
			fmt.Printf(" -- working file preserved: %s\n", packFh.Name())
		}

		sp := storage.NewJSONPacker(packFh)
		fp := storage.NewDiscardFilePutter()
		dissam, err := asm.NewInputTarStream(fh, sp, fp)
		if err != nil {
			log.Fatal(err)
		}

		var num int
		tr := tar.NewReader(dissam)
		for {
			_, err = tr.Next()
			if err != nil {
				if err == io.EOF {
					break
				}
				log.Fatal(err)
			}
			num++
			if _, err := io.Copy(ioutil.Discard, tr); err != nil {
				log.Fatal(err)
			}
		}
		fmt.Printf(" -- number of files: %d\n", num)

		if err := packFh.Sync(); err != nil {
			log.Fatal(err)
		}

		fi, err = packFh.Stat()
		if err != nil {
			log.Fatal(err)
		}
		fmt.Printf(" -- size of metadata uncompressed: %dk\n", fi.Size()/1024)

		gzPackFh, err := ioutil.TempFile("", "packed.gz.")
		if err != nil {
			log.Fatal(err)
		}
		defer gzPackFh.Close()
		if !c.Bool("work") {
			defer os.Remove(gzPackFh.Name())
		}

		gzWrtr := gzip.NewWriter(gzPackFh)

		if _, err := packFh.Seek(0, 0); err != nil {
			log.Fatal(err)
		}

		if _, err := io.Copy(gzWrtr, packFh); err != nil {
			log.Fatal(err)
		}
		gzWrtr.Close()

		if err := gzPackFh.Sync(); err != nil {
			log.Fatal(err)
		}

		fi, err = gzPackFh.Stat()
		if err != nil {
			log.Fatal(err)
		}
		fmt.Printf(" -- size of gzip compressed metadata: %dk\n", fi.Size()/1024)
	}
}
示例#13
0
func TestTarStream(t *testing.T) {

	for _, tc := range testCases {
		fh, err := os.Open(tc.path)
		if err != nil {
			t.Fatal(err)
		}
		defer fh.Close()
		gzRdr, err := gzip.NewReader(fh)
		if err != nil {
			t.Fatal(err)
		}
		defer gzRdr.Close()

		// Setup where we'll store the metadata
		w := bytes.NewBuffer([]byte{})
		sp := storage.NewJSONPacker(w)
		fgp := storage.NewBufferFileGetPutter()

		// wrap the disassembly stream
		tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
		if err != nil {
			t.Fatal(err)
		}

		// get a sum of the stream after it has passed through to ensure it's the same.
		h0 := sha1.New()
		i, err := io.Copy(h0, tarStream)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h0.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of tar: expected %s; got %x", tc.expectedSHA1Sum, h0.Sum(nil))
		}

		//t.Logf("%s", w.String()) // if we fail, then show the packed info

		// If we've made it this far, then we'll turn it around and create a tar
		// stream from the packed metadata and buffered file contents.
		r := bytes.NewBuffer(w.Bytes())
		sup := storage.NewJSONUnpacker(r)
		// and reuse the fgp that we Put the payloads to.

		rc := NewOutputTarStream(fgp, sup)
		h1 := sha1.New()
		i, err = io.Copy(h1, rc)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of output tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h1.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of output tar: expected %s; got %x", tc.expectedSHA1Sum, h1.Sum(nil))
		}
	}
}