Exemple #1
0
func TestTarStreamOld(t *testing.T) {
	fgp := storage.NewBufferFileGetPutter()

	// first lets prep a GetPutter and Packer
	for i := range entries {
		if entries[i].Entry.Type == storage.FileType {
			j, csum, err := fgp.Put(entries[i].Entry.Name, bytes.NewBuffer(entries[i].Body))
			if err != nil {
				t.Error(err)
			}
			if j != entries[i].Entry.Size {
				t.Errorf("size %q: expected %d; got %d",
					entries[i].Entry.Name,
					entries[i].Entry.Size,
					j)
			}
			if !bytes.Equal(csum, entries[i].Entry.Payload) {
				t.Errorf("checksum %q: expected %v; got %v",
					entries[i].Entry.Name,
					entries[i].Entry.Payload,
					csum)
			}
		}
	}

	// next we'll use these to produce a tar stream.
	_ = NewOutputTarStream(fgp, nil)
	// TODO finish this
}
func TestTarStreamMangledGetterPutter(t *testing.T) {
	fgp := storage.NewBufferFileGetPutter()

	// first lets prep a GetPutter and Packer
	for i := range entries {
		if entries[i].Entry.Type == storage.FileType {
			j, csum, err := fgp.Put(entries[i].Entry.Name, bytes.NewBuffer(entries[i].Body))
			if err != nil {
				t.Error(err)
			}
			if j != entries[i].Entry.Size {
				t.Errorf("size %q: expected %d; got %d",
					entries[i].Entry.Name,
					entries[i].Entry.Size,
					j)
			}
			if !bytes.Equal(csum, entries[i].Entry.Payload) {
				t.Errorf("checksum %q: expected %v; got %v",
					entries[i].Entry.Name,
					entries[i].Entry.Payload,
					csum)
			}
		}
	}

	for _, e := range entriesMangled {
		if e.Entry.Type == storage.FileType {
			rdr, err := fgp.Get(e.Entry.Name)
			if err != nil {
				t.Error(err)
			}
			c := crc64.New(storage.CRCTable)
			i, err := io.Copy(c, rdr)
			if err != nil {
				t.Fatal(err)
			}
			rdr.Close()

			csum := c.Sum(nil)
			if bytes.Equal(csum, e.Entry.Payload) {
				t.Errorf("wrote %d bytes. checksum for %q should not have matched! %v",
					i,
					e.Entry.Name,
					csum)
			}
		}
	}
}
Exemple #3
0
func BenchmarkAsm(b *testing.B) {
	for i := 0; i < b.N; i++ {
		for _, tc := range testCases {
			func() {
				fh, err := os.Open(tc.path)
				if err != nil {
					b.Fatal(err)
				}
				defer fh.Close()
				gzRdr, err := gzip.NewReader(fh)
				if err != nil {
					b.Fatal(err)
				}
				defer gzRdr.Close()

				// Setup where we'll store the metadata
				w := bytes.NewBuffer([]byte{})
				sp := storage.NewJSONPacker(w)
				fgp := storage.NewBufferFileGetPutter()

				// wrap the disassembly stream
				tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
				if err != nil {
					b.Fatal(err)
				}
				// read it all to the bit bucket
				i1, err := io.Copy(ioutil.Discard, tarStream)
				if err != nil {
					b.Fatal(err)
				}

				r := bytes.NewBuffer(w.Bytes())
				sup := storage.NewJSONUnpacker(r)
				// and reuse the fgp that we Put the payloads to.

				rc := NewOutputTarStream(fgp, sup)

				i2, err := io.Copy(ioutil.Discard, rc)
				if err != nil {
					b.Fatal(err)
				}
				if i1 != i2 {
					b.Errorf("%s: input(%d) and ouput(%d) byte count didn't match", tc.path, i1, i2)
				}
			}()
		}
	}
}
func TestTarStream(t *testing.T) {
	var (
		expectedSum        = "1eb237ff69bca6e22789ecb05b45d35ca307adbd"
		expectedSize int64 = 10240
	)

	fh, err := os.Open("./testdata/t.tar.gz")
	if err != nil {
		t.Fatal(err)
	}
	defer fh.Close()
	gzRdr, err := gzip.NewReader(fh)
	if err != nil {
		t.Fatal(err)
	}
	defer gzRdr.Close()

	// Setup where we'll store the metadata
	w := bytes.NewBuffer([]byte{})
	sp := storage.NewJSONPacker(w)
	fgp := storage.NewBufferFileGetPutter()

	// wrap the disassembly stream
	tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
	if err != nil {
		t.Fatal(err)
	}

	// get a sum of the stream after it has passed through to ensure it's the same.
	h0 := sha1.New()
	tRdr0 := io.TeeReader(tarStream, h0)

	// read it all to the bit bucket
	i, err := io.Copy(ioutil.Discard, tRdr0)
	if err != nil {
		t.Fatal(err)
	}

	if i != expectedSize {
		t.Errorf("size of tar: expected %d; got %d", expectedSize, i)
	}
	if fmt.Sprintf("%x", h0.Sum(nil)) != expectedSum {
		t.Fatalf("checksum of tar: expected %s; got %x", expectedSum, h0.Sum(nil))
	}

	t.Logf("%s", w.String()) // if we fail, then show the packed info

	// If we've made it this far, then we'll turn it around and create a tar
	// stream from the packed metadata and buffered file contents.
	r := bytes.NewBuffer(w.Bytes())
	sup := storage.NewJSONUnpacker(r)
	// and reuse the fgp that we Put the payloads to.

	rc := NewOutputTarStream(fgp, sup)
	h1 := sha1.New()
	i, err = io.Copy(h1, rc)
	if err != nil {
		t.Fatal(err)
	}

	if i != expectedSize {
		t.Errorf("size of output tar: expected %d; got %d", expectedSize, i)
	}
	if fmt.Sprintf("%x", h1.Sum(nil)) != expectedSum {
		t.Fatalf("checksum of output tar: expected %s; got %x", expectedSum, h1.Sum(nil))
	}
}
func TestTarStream(t *testing.T) {
	testCases := []struct {
		path            string
		expectedSHA1Sum string
		expectedSize    int64
	}{
		{"./testdata/t.tar.gz", "1eb237ff69bca6e22789ecb05b45d35ca307adbd", 10240},
		{"./testdata/longlink.tar.gz", "d9f6babe107b7247953dff6b5b5ae31a3a880add", 20480},
		{"./testdata/fatlonglink.tar.gz", "8537f03f89aeef537382f8b0bb065d93e03b0be8", 26234880},
		{"./testdata/iso-8859.tar.gz", "ddafa51cb03c74ec117ab366ee2240d13bba1ec3", 10240},
	}

	for _, tc := range testCases {
		fh, err := os.Open(tc.path)
		if err != nil {
			t.Fatal(err)
		}
		defer fh.Close()
		gzRdr, err := gzip.NewReader(fh)
		if err != nil {
			t.Fatal(err)
		}
		defer gzRdr.Close()

		// Setup where we'll store the metadata
		w := bytes.NewBuffer([]byte{})
		sp := storage.NewJSONPacker(w)
		fgp := storage.NewBufferFileGetPutter()

		// wrap the disassembly stream
		tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
		if err != nil {
			t.Fatal(err)
		}

		// get a sum of the stream after it has passed through to ensure it's the same.
		h0 := sha1.New()
		tRdr0 := io.TeeReader(tarStream, h0)

		// read it all to the bit bucket
		i, err := io.Copy(ioutil.Discard, tRdr0)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h0.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of tar: expected %s; got %x", tc.expectedSHA1Sum, h0.Sum(nil))
		}

		//t.Logf("%s", w.String()) // if we fail, then show the packed info

		// If we've made it this far, then we'll turn it around and create a tar
		// stream from the packed metadata and buffered file contents.
		r := bytes.NewBuffer(w.Bytes())
		sup := storage.NewJSONUnpacker(r)
		// and reuse the fgp that we Put the payloads to.

		rc := NewOutputTarStream(fgp, sup)
		h1 := sha1.New()
		i, err = io.Copy(h1, rc)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of output tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h1.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of output tar: expected %s; got %x", tc.expectedSHA1Sum, h1.Sum(nil))
		}
	}
}
Exemple #6
0
func TestTarStream(t *testing.T) {

	for _, tc := range testCases {
		fh, err := os.Open(tc.path)
		if err != nil {
			t.Fatal(err)
		}
		defer fh.Close()
		gzRdr, err := gzip.NewReader(fh)
		if err != nil {
			t.Fatal(err)
		}
		defer gzRdr.Close()

		// Setup where we'll store the metadata
		w := bytes.NewBuffer([]byte{})
		sp := storage.NewJSONPacker(w)
		fgp := storage.NewBufferFileGetPutter()

		// wrap the disassembly stream
		tarStream, err := NewInputTarStream(gzRdr, sp, fgp)
		if err != nil {
			t.Fatal(err)
		}

		// get a sum of the stream after it has passed through to ensure it's the same.
		h0 := sha1.New()
		i, err := io.Copy(h0, tarStream)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h0.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of tar: expected %s; got %x", tc.expectedSHA1Sum, h0.Sum(nil))
		}

		//t.Logf("%s", w.String()) // if we fail, then show the packed info

		// If we've made it this far, then we'll turn it around and create a tar
		// stream from the packed metadata and buffered file contents.
		r := bytes.NewBuffer(w.Bytes())
		sup := storage.NewJSONUnpacker(r)
		// and reuse the fgp that we Put the payloads to.

		rc := NewOutputTarStream(fgp, sup)
		h1 := sha1.New()
		i, err = io.Copy(h1, rc)
		if err != nil {
			t.Fatal(err)
		}

		if i != tc.expectedSize {
			t.Errorf("size of output tar: expected %d; got %d", tc.expectedSize, i)
		}
		if fmt.Sprintf("%x", h1.Sum(nil)) != tc.expectedSHA1Sum {
			t.Fatalf("checksum of output tar: expected %s; got %x", tc.expectedSHA1Sum, h1.Sum(nil))
		}
	}
}