Exemplo n.º 1
0
func TestBufferedFileWriter(t *testing.T) {
	ctx := context.Background()
	writer, err := newFileWriter(ctx, inmemory.New(), "/random")

	if err != nil {
		t.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error())
	}

	// write one byte and ensure the offset hasn't been incremented.
	// offset will only get incremented when the buffer gets flushed
	short := []byte{byte(1)}

	writer.Write(short)

	if writer.offset > 0 {
		t.Fatalf("WriteStream called prematurely")
	}

	// write enough data to cause the buffer to flush and confirm
	// the offset has been incremented
	long := make([]byte, fileWriterBufferSize)
	_, err = rand.Read(long)
	if err != nil {
		t.Fatalf("unexpected error building random data: %v", err)
	}
	for i := range long {
		long[i] = byte(i)
	}
	writer.Write(long)
	writer.Close()
	if writer.offset != (fileWriterBufferSize + 1) {
		t.Fatalf("WriteStream not called when buffer capacity reached")
	}
}
Exemplo n.º 2
0
func TestListener(t *testing.T) {
	ctx := context.Background()
	registry := storage.NewRegistryWithDriver(ctx, inmemory.New(), memory.NewInMemoryBlobDescriptorCacheProvider(), true)
	tl := &testListener{
		ops: make(map[string]int),
	}

	repository, err := registry.Repository(ctx, "foo/bar")
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	repository = Listen(repository, tl)

	// Now take the registry through a number of operations
	checkExerciseRepository(t, repository)

	expectedOps := map[string]int{
		"manifest:push": 1,
		"manifest:pull": 2,
		// "manifest:delete": 0, // deletes not supported for now
		"layer:push": 2,
		"layer:pull": 2,
		// "layer:delete":    0, // deletes not supported for now
	}

	if !reflect.DeepEqual(tl.ops, expectedOps) {
		t.Fatalf("counts do not match:\n%v\n !=\n%v", tl.ops, expectedOps)
	}

}
Exemplo n.º 3
0
func testUploadFS(t *testing.T, numUploads int, repoName string, startedAt time.Time) (driver.StorageDriver, context.Context) {
	d := inmemory.New()
	ctx := context.Background()
	for i := 0; i < numUploads; i++ {
		addUploads(ctx, t, d, uuid.Generate().String(), repoName, startedAt)
	}
	return d, ctx
}
Exemplo n.º 4
0
// TestLayerUploadZeroLength uploads zero-length
func TestLayerUploadZeroLength(t *testing.T) {
	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true)
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs := repository.Blobs(ctx)

	simpleUpload(t, bs, []byte{}, digest.DigestSha256EmptyTar)
}
Exemplo n.º 5
0
// TestFileReaderNonExistentFile ensures the reader behaves as expected with a
// missing or zero-length remote file. While the file may not exist, the
// reader should not error out on creation and should return 0-bytes from the
// read method, with an io.EOF error.
func TestFileReaderNonExistentFile(t *testing.T) {
	driver := inmemory.New()
	fr, err := newFileReader(context.Background(), driver, "/doesnotexist", 10)
	if err != nil {
		t.Fatalf("unexpected error initializing reader: %v", err)
	}

	var buf [1024]byte

	n, err := fr.Read(buf[:])
	if n != 0 {
		t.Fatalf("non-zero byte read reported: %d != 0", n)
	}

	if err != io.EOF {
		t.Fatalf("read on missing file should return io.EOF, got %v", err)
	}
}
Exemplo n.º 6
0
func newManifestStoreTestEnv(t *testing.T, name, tag string) *manifestStoreTestEnv {
	ctx := context.Background()
	driver := inmemory.New()
	registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true)
	repo, err := registry.Repository(ctx, name)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}

	return &manifestStoreTestEnv{
		ctx:        ctx,
		driver:     driver,
		registry:   registry,
		repository: repo,
		name:       name,
		tag:        tag,
	}
}
Exemplo n.º 7
0
func testFS(t *testing.T) (driver.StorageDriver, map[string]string, context.Context) {
	d := inmemory.New()
	c := []byte("")
	ctx := context.Background()
	if err := d.PutContent(ctx, "/a/b/c/d", c); err != nil {
		t.Fatalf("Unable to put to inmemory fs")
	}
	if err := d.PutContent(ctx, "/a/b/c/e", c); err != nil {
		t.Fatalf("Unable to put to inmemory fs")
	}

	expected := map[string]string{
		"/a":       "dir",
		"/a/b":     "dir",
		"/a/b/c":   "dir",
		"/a/b/c/d": "file",
		"/a/b/c/e": "file",
	}

	return d, expected, ctx
}
Exemplo n.º 8
0
func TestSimpleRead(t *testing.T) {
	ctx := context.Background()
	content := make([]byte, 1<<20)
	n, err := rand.Read(content)
	if err != nil {
		t.Fatalf("unexpected error building random data: %v", err)
	}

	if n != len(content) {
		t.Fatalf("random read didn't fill buffer")
	}

	dgst, err := digest.FromReader(bytes.NewReader(content))
	if err != nil {
		t.Fatalf("unexpected error digesting random content: %v", err)
	}

	driver := inmemory.New()
	path := "/random"

	if err := driver.PutContent(ctx, path, content); err != nil {
		t.Fatalf("error putting patterned content: %v", err)
	}

	fr, err := newFileReader(ctx, driver, path, int64(len(content)))
	if err != nil {
		t.Fatalf("error allocating file reader: %v", err)
	}

	verifier, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		t.Fatalf("error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify read data")
	}
}
Exemplo n.º 9
0
func BenchmarkFileWriter(b *testing.B) {
	b.StopTimer() // not sure how long setup above will take
	for i := 0; i < b.N; i++ {
		// Start basic fileWriter initialization
		fw := fileWriter{
			driver: inmemory.New(),
			path:   "/random",
		}
		ctx := context.Background()
		if fi, err := fw.driver.Stat(ctx, fw.path); err != nil {
			switch err := err.(type) {
			case storagedriver.PathNotFoundError:
				// ignore, offset is zero
			default:
				b.Fatalf("Failed to initialize fileWriter: %v", err.Error())
			}
		} else {
			if fi.IsDir() {
				b.Fatalf("Cannot write to a directory")
			}

			fw.size = fi.Size()
		}

		randomBytes := make([]byte, 1<<20)
		_, err := rand.Read(randomBytes)
		if err != nil {
			b.Fatalf("unexpected error building random data: %v", err)
		}
		// End basic file writer initialization

		b.StartTimer()
		for j := 0; j < 100; j++ {
			fw.Write(randomBytes)
		}
		b.StopTimer()
	}
}
Exemplo n.º 10
0
func BenchmarkBufferedFileWriter(b *testing.B) {
	b.StopTimer() // not sure how long setup above will take
	ctx := context.Background()
	for i := 0; i < b.N; i++ {
		bfw, err := newFileWriter(ctx, inmemory.New(), "/random")

		if err != nil {
			b.Fatalf("Failed to initialize bufferedFileWriter: %v", err.Error())
		}

		randomBytes := make([]byte, 1<<20)
		_, err = rand.Read(randomBytes)
		if err != nil {
			b.Fatalf("unexpected error building random data: %v", err)
		}

		b.StartTimer()
		for j := 0; j < 100; j++ {
			bfw.Write(randomBytes)
		}
		b.StopTimer()
	}
}
Exemplo n.º 11
0
// TestSimpleWrite takes the fileWriter through common write operations
// ensuring data integrity.
func TestSimpleWrite(t *testing.T) {
	content := make([]byte, 1<<20)
	n, err := rand.Read(content)
	if err != nil {
		t.Fatalf("unexpected error building random data: %v", err)
	}

	if n != len(content) {
		t.Fatalf("random read did't fill buffer")
	}

	dgst, err := digest.FromReader(bytes.NewReader(content))
	if err != nil {
		t.Fatalf("unexpected error digesting random content: %v", err)
	}

	driver := inmemory.New()
	path := "/random"
	ctx := context.Background()

	fw, err := newFileWriter(ctx, driver, path)
	if err != nil {
		t.Fatalf("unexpected error creating fileWriter: %v", err)
	}
	defer fw.Close()

	n, err = fw.Write(content)
	if err != nil {
		t.Fatalf("unexpected error writing content: %v", err)
	}
	fw.Flush()

	if n != len(content) {
		t.Fatalf("unexpected write length: %d != %d", n, len(content))
	}

	fr, err := newFileReader(ctx, driver, path, int64(len(content)))
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	verifier, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		t.Fatalf("unexpected error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify write data")
	}

	// Check the seek position is equal to the content length
	end, err := fw.Seek(0, os.SEEK_END)
	if err != nil {
		t.Fatalf("unexpected error seeking: %v", err)
	}

	if end != int64(len(content)) {
		t.Fatalf("write did not advance offset: %d != %d", end, len(content))
	}

	// Double the content
	doubled := append(content, content...)
	doubledgst, err := digest.FromReader(bytes.NewReader(doubled))
	if err != nil {
		t.Fatalf("unexpected error digesting doubled content: %v", err)
	}

	nn, err := fw.ReadFrom(bytes.NewReader(content))
	if err != nil {
		t.Fatalf("unexpected error doubling content: %v", err)
	}

	if nn != int64(len(content)) {
		t.Fatalf("writeat was short: %d != %d", n, len(content))
	}

	fr, err = newFileReader(ctx, driver, path, int64(len(doubled)))
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	verifier, err = digest.NewDigestVerifier(doubledgst)
	if err != nil {
		t.Fatalf("unexpected error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify write data")
	}

	// Check that Write updated the offset.
	end, err = fw.Seek(0, os.SEEK_END)
	if err != nil {
		t.Fatalf("unexpected error seeking: %v", err)
	}

	if end != int64(len(doubled)) {
		t.Fatalf("write did not advance offset: %d != %d", end, len(doubled))
	}

	// Now, we copy from one path to another, running the data through the
	// fileReader to fileWriter, rather than the driver.Move command to ensure
	// everything is working correctly.
	fr, err = newFileReader(ctx, driver, path, int64(len(doubled)))
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	fw, err = newFileWriter(ctx, driver, "/copied")
	if err != nil {
		t.Fatalf("unexpected error creating fileWriter: %v", err)
	}
	defer fw.Close()

	nn, err = io.Copy(fw, fr)
	if err != nil {
		t.Fatalf("unexpected error copying data: %v", err)
	}

	if nn != int64(len(doubled)) {
		t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled))
	}

	fr, err = newFileReader(ctx, driver, "/copied", int64(len(doubled)))
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	verifier, err = digest.NewDigestVerifier(doubledgst)
	if err != nil {
		t.Fatalf("unexpected error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify write data")
	}
}
Exemplo n.º 12
0
func TestFileReaderSeek(t *testing.T) {
	driver := inmemory.New()
	pattern := "01234567890ab" // prime length block
	repititions := 1024
	path := "/patterned"
	content := bytes.Repeat([]byte(pattern), repititions)
	ctx := context.Background()

	if err := driver.PutContent(ctx, path, content); err != nil {
		t.Fatalf("error putting patterned content: %v", err)
	}

	fr, err := newFileReader(ctx, driver, path, int64(len(content)))

	if err != nil {
		t.Fatalf("unexpected error creating file reader: %v", err)
	}

	// Seek all over the place, in blocks of pattern size and make sure we get
	// the right data.
	for _, repitition := range mrand.Perm(repititions - 1) {
		targetOffset := int64(len(pattern) * repitition)
		// Seek to a multiple of pattern size and read pattern size bytes
		offset, err := fr.Seek(targetOffset, os.SEEK_SET)
		if err != nil {
			t.Fatalf("unexpected error seeking: %v", err)
		}

		if offset != targetOffset {
			t.Fatalf("did not seek to correct offset: %d != %d", offset, targetOffset)
		}

		p := make([]byte, len(pattern))

		n, err := fr.Read(p)
		if err != nil {
			t.Fatalf("error reading pattern: %v", err)
		}

		if n != len(pattern) {
			t.Fatalf("incorrect read length: %d != %d", n, len(pattern))
		}

		if string(p) != pattern {
			t.Fatalf("incorrect read content: %q != %q", p, pattern)
		}

		// Check offset
		current, err := fr.Seek(0, os.SEEK_CUR)
		if err != nil {
			t.Fatalf("error checking current offset: %v", err)
		}

		if current != targetOffset+int64(len(pattern)) {
			t.Fatalf("unexpected offset after read: %v", err)
		}
	}

	start, err := fr.Seek(0, os.SEEK_SET)
	if err != nil {
		t.Fatalf("error seeking to start: %v", err)
	}

	if start != 0 {
		t.Fatalf("expected to seek to start: %v != 0", start)
	}

	end, err := fr.Seek(0, os.SEEK_END)
	if err != nil {
		t.Fatalf("error checking current offset: %v", err)
	}

	if end != int64(len(content)) {
		t.Fatalf("expected to seek to end: %v != %v", end, len(content))
	}

	// 4. Seek before start, ensure error.

	// seek before start
	before, err := fr.Seek(-1, os.SEEK_SET)
	if err == nil {
		t.Fatalf("error expected, returned offset=%v", before)
	}

	// 5. Seek after end,
	after, err := fr.Seek(1, os.SEEK_END)
	if err != nil {
		t.Fatalf("unexpected error expected, returned offset=%v", after)
	}

	p := make([]byte, 16)
	n, err := fr.Read(p)

	if n != 0 {
		t.Fatalf("bytes reads %d != %d", n, 0)
	}

	if err != io.EOF {
		t.Fatalf("expected io.EOF, got %v", err)
	}
}
Exemplo n.º 13
0
// TestAppDispatcher builds an application with a test dispatcher and ensures
// that requests are properly dispatched and the handlers are constructed.
// This only tests the dispatch mechanism. The underlying dispatchers must be
// tested individually.
func TestAppDispatcher(t *testing.T) {
	driver := inmemory.New()
	ctx := context.Background()
	app := &App{
		Config:   configuration.Configuration{},
		Context:  ctx,
		router:   v2.Router(),
		driver:   driver,
		registry: storage.NewRegistryWithDriver(ctx, driver, memorycache.NewInMemoryBlobDescriptorCacheProvider(), true),
	}
	server := httptest.NewServer(app)
	router := v2.Router()

	serverURL, err := url.Parse(server.URL)
	if err != nil {
		t.Fatalf("error parsing server url: %v", err)
	}

	varCheckingDispatcher := func(expectedVars map[string]string) dispatchFunc {
		return func(ctx *Context, r *http.Request) http.Handler {
			// Always checks the same name context
			if ctx.Repository.Name() != getName(ctx) {
				t.Fatalf("unexpected name: %q != %q", ctx.Repository.Name(), "foo/bar")
			}

			// Check that we have all that is expected
			for expectedK, expectedV := range expectedVars {
				if ctx.Value(expectedK) != expectedV {
					t.Fatalf("unexpected %s in context vars: %q != %q", expectedK, ctx.Value(expectedK), expectedV)
				}
			}

			// Check that we only have variables that are expected
			for k, v := range ctx.Value("vars").(map[string]string) {
				_, ok := expectedVars[k]

				if !ok { // name is checked on context
					// We have an unexpected key, fail
					t.Fatalf("unexpected key %q in vars with value %q", k, v)
				}
			}

			return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
				w.WriteHeader(http.StatusOK)
			})
		}
	}

	// unflatten a list of variables, suitable for gorilla/mux, to a map[string]string
	unflatten := func(vars []string) map[string]string {
		m := make(map[string]string)
		for i := 0; i < len(vars)-1; i = i + 2 {
			m[vars[i]] = vars[i+1]
		}

		return m
	}

	for _, testcase := range []struct {
		endpoint string
		vars     []string
	}{
		{
			endpoint: v2.RouteNameManifest,
			vars: []string{
				"name", "foo/bar",
				"reference", "sometag",
			},
		},
		{
			endpoint: v2.RouteNameTags,
			vars: []string{
				"name", "foo/bar",
			},
		},
		{
			endpoint: v2.RouteNameBlob,
			vars: []string{
				"name", "foo/bar",
				"digest", "tarsum.v1+bogus:abcdef0123456789",
			},
		},
		{
			endpoint: v2.RouteNameBlobUpload,
			vars: []string{
				"name", "foo/bar",
			},
		},
		{
			endpoint: v2.RouteNameBlobUploadChunk,
			vars: []string{
				"name", "foo/bar",
				"uuid", "theuuid",
			},
		},
	} {
		app.register(testcase.endpoint, varCheckingDispatcher(unflatten(testcase.vars)))
		route := router.GetRoute(testcase.endpoint).Host(serverURL.Host)
		u, err := route.URL(testcase.vars...)

		if err != nil {
			t.Fatal(err)
		}

		resp, err := http.Get(u.String())

		if err != nil {
			t.Fatal(err)
		}

		if resp.StatusCode != http.StatusOK {
			t.Fatalf("unexpected status code: %v != %v", resp.StatusCode, http.StatusOK)
		}
	}
}
Exemplo n.º 14
0
// TestSimpleBlobRead just creates a simple blob file and ensures that basic
// open, read, seek, read works. More specific edge cases should be covered in
// other tests.
func TestSimpleBlobRead(t *testing.T) {
	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true)
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs := repository.Blobs(ctx)

	randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
	if err != nil {
		t.Fatalf("error creating random data: %v", err)
	}

	dgst := digest.Digest(tarSumStr)

	// Test for existence.
	desc, err := bs.Stat(ctx, dgst)
	if err != distribution.ErrBlobUnknown {
		t.Fatalf("expected not found error when testing for existence: %v", err)
	}

	rc, err := bs.Open(ctx, dgst)
	if err != distribution.ErrBlobUnknown {
		t.Fatalf("expected not found error when opening non-existent blob: %v", err)
	}

	randomLayerSize, err := seekerSize(randomLayerReader)
	if err != nil {
		t.Fatalf("error getting seeker size for random layer: %v", err)
	}

	descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize}
	t.Logf("desc: %v", descBefore)

	desc, err = addBlob(ctx, bs, descBefore, randomLayerReader)
	if err != nil {
		t.Fatalf("error adding blob to blobservice: %v", err)
	}

	if desc.Size != randomLayerSize {
		t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
	}

	rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
	if err != nil {
		t.Fatalf("error opening blob with %v: %v", dgst, err)
	}
	defer rc.Close()

	// Now check the sha digest and ensure its the same
	h := sha256.New()
	nn, err := io.Copy(h, rc)
	if err != nil {
		t.Fatalf("unexpected error copying to hash: %v", err)
	}

	if nn != randomLayerSize {
		t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize)
	}

	sha256Digest := digest.NewDigest("sha256", h)
	if sha256Digest != desc.Digest {
		t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest)
	}

	// Now seek back the blob, read the whole thing and check against randomLayerData
	offset, err := rc.Seek(0, os.SEEK_SET)
	if err != nil {
		t.Fatalf("error seeking blob: %v", err)
	}

	if offset != 0 {
		t.Fatalf("seek failed: expected 0 offset, got %d", offset)
	}

	p, err := ioutil.ReadAll(rc)
	if err != nil {
		t.Fatalf("error reading all of blob: %v", err)
	}

	if len(p) != int(randomLayerSize) {
		t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize)
	}

	// Reset the randomLayerReader and read back the buffer
	_, err = randomLayerReader.Seek(0, os.SEEK_SET)
	if err != nil {
		t.Fatalf("error resetting layer reader: %v", err)
	}

	randomLayerData, err := ioutil.ReadAll(randomLayerReader)
	if err != nil {
		t.Fatalf("random layer read failed: %v", err)
	}

	if !bytes.Equal(p, randomLayerData) {
		t.Fatalf("layer data not equal")
	}
}
Exemplo n.º 15
0
// TestSimpleBlobUpload covers the blob upload process, exercising common
// error paths that might be seen during an upload.
func TestSimpleBlobUpload(t *testing.T) {
	randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()
	if err != nil {
		t.Fatalf("error creating random reader: %v", err)
	}

	dgst := digest.Digest(tarSumStr)
	if err != nil {
		t.Fatalf("error allocating upload store: %v", err)
	}

	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), true)
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs := repository.Blobs(ctx)

	h := sha256.New()
	rd := io.TeeReader(randomDataReader, h)

	blobUpload, err := bs.Create(ctx)

	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Cancel the upload then restart it
	if err := blobUpload.Cancel(ctx); err != nil {
		t.Fatalf("unexpected error during upload cancellation: %v", err)
	}

	// Do a resume, get unknown upload
	blobUpload, err = bs.Resume(ctx, blobUpload.ID())
	if err != distribution.ErrBlobUploadUnknown {
		t.Fatalf("unexpected error resuming upload, should be unknown: %v", err)
	}

	// Restart!
	blobUpload, err = bs.Create(ctx)
	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Get the size of our random tarfile
	randomDataSize, err := seekerSize(randomDataReader)
	if err != nil {
		t.Fatalf("error getting seeker size of random data: %v", err)
	}

	nn, err := io.Copy(blobUpload, rd)
	if err != nil {
		t.Fatalf("unexpected error uploading layer data: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("layer data write incomplete")
	}

	offset, err := blobUpload.Seek(0, os.SEEK_CUR)
	if err != nil {
		t.Fatalf("unexpected error seeking layer upload: %v", err)
	}

	if offset != nn {
		t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn)
	}
	blobUpload.Close()

	// Do a resume, for good fun
	blobUpload, err = bs.Resume(ctx, blobUpload.ID())
	if err != nil {
		t.Fatalf("unexpected error resuming upload: %v", err)
	}

	sha256Digest := digest.NewDigest("sha256", h)
	desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
	if err != nil {
		t.Fatalf("unexpected error finishing layer upload: %v", err)
	}

	// After finishing an upload, it should no longer exist.
	if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown {
		t.Fatalf("expected layer upload to be unknown, got %v", err)
	}

	// Test for existence.
	statDesc, err := bs.Stat(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
	}

	if statDesc != desc {
		t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
	}

	rc, err := bs.Open(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error opening blob for read: %v", err)
	}
	defer rc.Close()

	h.Reset()
	nn, err = io.Copy(h, rc)
	if err != nil {
		t.Fatalf("error reading layer: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("incorrect read length")
	}

	if digest.NewDigest("sha256", h) != sha256Digest {
		t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
	}

	// Delete a blob
	err = bs.Delete(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("Unexpected error deleting blob")
	}

	d, err := bs.Stat(ctx, desc.Digest)
	if err == nil {
		t.Fatalf("unexpected non-error stating deleted blob: %s", d)
	}

	switch err {
	case distribution.ErrBlobUnknown:
		break
	default:
		t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
	}

	_, err = bs.Open(ctx, desc.Digest)
	if err == nil {
		t.Fatalf("unexpected success opening deleted blob for read")
	}

	switch err {
	case distribution.ErrBlobUnknown:
		break
	default:
		t.Errorf("Unexpected error type getting deleted manifest: %#v", err)
	}

	// Re-upload the blob
	randomBlob, err := ioutil.ReadAll(randomDataReader)
	if err != nil {
		t.Fatalf("Error reading all of blob %s", err.Error())
	}
	expectedDigest, err := digest.FromBytes(randomBlob)
	if err != nil {
		t.Fatalf("Error getting digest from bytes: %s", err)
	}
	simpleUpload(t, bs, randomBlob, expectedDigest)

	d, err = bs.Stat(ctx, expectedDigest)
	if err != nil {
		t.Errorf("unexpected error stat-ing blob")
	}
	if d.Digest != expectedDigest {
		t.Errorf("Mismatching digest with restored blob")
	}

	_, err = bs.Open(ctx, expectedDigest)
	if err != nil {
		t.Errorf("Unexpected error opening blob")
	}

	// Reuse state to test delete with a delete-disabled registry
	registry = NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider(), false)
	repository, err = registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs = repository.Blobs(ctx)
	err = bs.Delete(ctx, desc.Digest)
	if err == nil {
		t.Errorf("Unexpected success deleting while disabled")
	}
}