Exemplo n.º 1
0
func uploadRandomLayer(t *testing.T, ctx context.Context, bi distribution.BlobIngester) digest.Digest {
	dr, size, err := createRandomData()
	if err != nil {
		t.Fatalf("failed to create random file: %v", err)
	}

	h := sha256.New()
	rd := io.TeeReader(dr, h)
	blobUpload, err := bi.Create(ctx)
	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}
	nn, err := io.Copy(blobUpload, rd)
	if err != nil {
		t.Fatalf("unexpected error uploading layer data: %v", err)
	}
	if nn != size {
		t.Fatalf("layer data write incomplete")
	}
	dgst := digest.NewDigest("sha256", h)
	_, err = blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
	if err != nil {
		t.Fatalf("unexpected error finishing layer upload: %v", err)
	}
	return dgst
}
Exemplo n.º 2
0
// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk
func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) {
	out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Buffering to Disk", nil))

	image, err := s.graph.Get(img.ID)
	if err != nil {
		return "", err
	}
	arch, err := image.TarLayer()
	if err != nil {
		return "", err
	}
	defer arch.Close()

	tf, err := s.graph.newTempFile()
	if err != nil {
		return "", err
	}
	defer func() {
		tf.Close()
		os.Remove(tf.Name())
	}()

	h := sha256.New()
	size, err := bufferToFile(tf, io.TeeReader(arch, h))
	if err != nil {
		return "", err
	}
	dgst := digest.NewDigest("sha256", h)

	// Send the layer
	log.Debugf("rendered layer for %s of [%d] size", img.ID, size)

	if err := r.PutV2ImageBlob(endpoint, imageName, dgst.Algorithm(), dgst.Hex(),
		progressreader.New(progressreader.Config{
			In:        tf,
			Out:       out,
			Formatter: sf,
			Size:      int(size),
			NewLines:  false,
			ID:        common.TruncateID(img.ID),
			Action:    "Pushing",
		}), auth); err != nil {
		out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil))
		return "", err
	}
	out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image successfully pushed", nil))
	return dgst.String(), nil
}
Exemplo n.º 3
0
func bufferToFile(f *os.File, src io.Reader) (int64, digest.Digest, error) {
	var (
		h = sha256.New()
		w = gzip.NewWriter(io.MultiWriter(f, h))
	)
	_, err := io.Copy(w, src)
	w.Close()
	if err != nil {
		return 0, "", err
	}
	n, err := f.Seek(0, os.SEEK_CUR)
	if err != nil {
		return 0, "", err
	}
	if _, err := f.Seek(0, 0); err != nil {
		return 0, "", err
	}
	return n, digest.NewDigest("sha256", h), nil
}
Exemplo n.º 4
0
// createTestLayer creates a simple test layer in the provided driver under
// tarsum dgst, returning the sha256 digest location. This is implemented
// piecemeal and should probably be replaced by the uploader when it's ready.
func writeTestLayer(driver storagedriver.StorageDriver, pathMapper *pathMapper, name string, dgst digest.Digest, content io.Reader) (digest.Digest, error) {
	h := sha256.New()
	rd := io.TeeReader(content, h)

	p, err := ioutil.ReadAll(rd)

	if err != nil {
		return "", nil
	}

	blobDigestSHA := digest.NewDigest("sha256", h)

	blobPath, err := pathMapper.path(blobDataPathSpec{
		digest: dgst,
	})

	ctx := context.Background()
	if err := driver.PutContent(ctx, blobPath, p); err != nil {
		return "", err
	}

	if err != nil {
		return "", err
	}

	layerLinkPath, err := pathMapper.path(layerLinkPathSpec{
		name:   name,
		digest: dgst,
	})

	if err != nil {
		return "", err
	}

	if err := driver.PutContent(ctx, layerLinkPath, []byte(dgst)); err != nil {
		return "", nil
	}

	return blobDigestSHA, err
}
Exemplo n.º 5
0
// Push pushes a plugin to a registry.
func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) {
	ref, err := reference.ParseNamed(name)
	if err != nil {
		return "", err
	}

	repoInfo, err := rs.ResolveRepository(ref)
	if err != nil {
		return "", err
	}

	if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil {
		return "", err
	}

	endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname())
	if err != nil {
		return "", err
	}

	var confirmedV2 bool
	var repository distribution.Repository
	for _, endpoint := range endpoints {
		if confirmedV2 && endpoint.Version == registry.APIVersion1 {
			logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL)
			continue
		}
		repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull")
		if err != nil {
			return "", err
		}
		if !confirmedV2 {
			return "", ErrUnSupportedRegistry
		}
		logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version)
		// This means that we found an endpoint. and we are ready to push
		break
	}

	// Returns a reference to the repository's blob service.
	blobs := repository.Blobs(context.Background())

	// Descriptor = {mediaType, size, digest}
	var descs []distribution.Descriptor

	for i, f := range []io.ReadCloser{config, layers} {
		bw, err := blobs.Create(context.Background())
		if err != nil {
			logrus.Debugf("Error in blobs.Create: %v", err)
			return "", err
		}
		h := sha256.New()
		r := io.TeeReader(f, h)
		_, err = io.Copy(bw, r)
		if err != nil {
			logrus.Debugf("Error in io.Copy: %v", err)
			return "", err
		}
		f.Close()
		mt := MediaTypeLayer
		if i == 0 {
			mt = MediaTypeConfig
		}
		// Commit completes the write process to the BlobService.
		// The descriptor arg to Commit is called the "provisional" descriptor and
		// used for validation.
		// The returned descriptor should be the one used. Its called the "Canonical"
		// descriptor.
		desc, err := bw.Commit(context.Background(), distribution.Descriptor{
			MediaType: mt,
			// XXX: What about the Size?
			Digest: digest.NewDigest("sha256", h),
		})
		if err != nil {
			logrus.Debugf("Error in bw.Commit: %v", err)
			return "", err
		}
		// The canonical descriptor is set the mediatype again, just in case.
		// Dont touch the digest or the size here.
		desc.MediaType = mt
		logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest)
		descs = append(descs, desc)
	}

	// XXX: schema2.Versioned needs a MediaType as well.
	// "application/vnd.docker.distribution.manifest.v2+json"
	m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]})
	if err != nil {
		logrus.Debugf("error in schema2.FromStruct: %v", err)
		return "", err
	}

	msv, err := repository.Manifests(context.Background())
	if err != nil {
		logrus.Debugf("error in repository.Manifests: %v", err)
		return "", err
	}

	_, pl, err := m.Payload()
	if err != nil {
		logrus.Debugf("error in m.Payload: %v", err)
		return "", err
	}

	logrus.Debugf("Pushed manifest: %s", pl)

	tag := DefaultTag
	if tagged, ok := ref.(reference.NamedTagged); ok {
		tag = tagged.Tag()
	}

	return msv.Put(context.Background(), m, distribution.WithTag(tag))
}
Exemplo n.º 6
0
// TestSimpleLayerUpload covers the layer upload process, exercising common
// error paths that might be seen during an upload.
func TestSimpleLayerUpload(t *testing.T) {
	randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()

	if err != nil {
		t.Fatalf("error creating random reader: %v", err)
	}

	dgst := digest.Digest(tarSumStr)

	if err != nil {
		t.Fatalf("error allocating upload store: %v", err)
	}

	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache())
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	ls := repository.Layers()

	h := sha256.New()
	rd := io.TeeReader(randomDataReader, h)

	layerUpload, err := ls.Upload()

	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Cancel the upload then restart it
	if err := layerUpload.Cancel(); err != nil {
		t.Fatalf("unexpected error during upload cancellation: %v", err)
	}

	// Do a resume, get unknown upload
	layerUpload, err = ls.Resume(layerUpload.UUID())
	if err != distribution.ErrLayerUploadUnknown {
		t.Fatalf("unexpected error resuming upload, should be unkown: %v", err)
	}

	// Restart!
	layerUpload, err = ls.Upload()
	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Get the size of our random tarfile
	randomDataSize, err := seekerSize(randomDataReader)
	if err != nil {
		t.Fatalf("error getting seeker size of random data: %v", err)
	}

	nn, err := io.Copy(layerUpload, rd)
	if err != nil {
		t.Fatalf("unexpected error uploading layer data: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("layer data write incomplete")
	}

	offset, err := layerUpload.Seek(0, os.SEEK_CUR)
	if err != nil {
		t.Fatalf("unexpected error seeking layer upload: %v", err)
	}

	if offset != nn {
		t.Fatalf("layerUpload not updated with correct offset: %v != %v", offset, nn)
	}
	layerUpload.Close()

	// Do a resume, for good fun
	layerUpload, err = ls.Resume(layerUpload.UUID())
	if err != nil {
		t.Fatalf("unexpected error resuming upload: %v", err)
	}

	sha256Digest := digest.NewDigest("sha256", h)
	layer, err := layerUpload.Finish(dgst)

	if err != nil {
		t.Fatalf("unexpected error finishing layer upload: %v", err)
	}

	// After finishing an upload, it should no longer exist.
	if _, err := ls.Resume(layerUpload.UUID()); err != distribution.ErrLayerUploadUnknown {
		t.Fatalf("expected layer upload to be unknown, got %v", err)
	}

	// Test for existence.
	exists, err := ls.Exists(layer.Digest())
	if err != nil {
		t.Fatalf("unexpected error checking for existence: %v", err)
	}

	if !exists {
		t.Fatalf("layer should now exist")
	}

	h.Reset()
	nn, err = io.Copy(h, layer)
	if err != nil {
		t.Fatalf("error reading layer: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("incorrect read length")
	}

	if digest.NewDigest("sha256", h) != sha256Digest {
		t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
	}
}
Exemplo n.º 7
0
// TestSimpleLayerRead just creates a simple layer file and ensures that basic
// open, read, seek, read works. More specific edge cases should be covered in
// other tests.
func TestSimpleLayerRead(t *testing.T) {
	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry := NewRegistryWithDriver(ctx, driver, cache.NewInMemoryLayerInfoCache())
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	ls := repository.Layers()

	randomLayerReader, tarSumStr, err := testutil.CreateRandomTarFile()
	if err != nil {
		t.Fatalf("error creating random data: %v", err)
	}

	dgst := digest.Digest(tarSumStr)

	// Test for existence.
	exists, err := ls.Exists(dgst)
	if err != nil {
		t.Fatalf("unexpected error checking for existence: %v", err)
	}

	if exists {
		t.Fatalf("layer should not exist")
	}

	// Try to get the layer and make sure we get a not found error
	layer, err := ls.Fetch(dgst)
	if err == nil {
		t.Fatalf("error expected fetching unknown layer")
	}

	switch err.(type) {
	case distribution.ErrUnknownLayer:
		err = nil
	default:
		t.Fatalf("unexpected error fetching non-existent layer: %v", err)
	}

	randomLayerDigest, err := writeTestLayer(driver, defaultPathMapper, imageName, dgst, randomLayerReader)
	if err != nil {
		t.Fatalf("unexpected error writing test layer: %v", err)
	}

	randomLayerSize, err := seekerSize(randomLayerReader)
	if err != nil {
		t.Fatalf("error getting seeker size for random layer: %v", err)
	}

	layer, err = ls.Fetch(dgst)
	if err != nil {
		t.Fatal(err)
	}
	defer layer.Close()

	// Now check the sha digest and ensure its the same
	h := sha256.New()
	nn, err := io.Copy(h, layer)
	if err != nil && err != io.EOF {
		t.Fatalf("unexpected error copying to hash: %v", err)
	}

	if nn != randomLayerSize {
		t.Fatalf("stored incorrect number of bytes in layer: %d != %d", nn, randomLayerSize)
	}

	sha256Digest := digest.NewDigest("sha256", h)
	if sha256Digest != randomLayerDigest {
		t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, randomLayerDigest)
	}

	// Now seek back the layer, read the whole thing and check against randomLayerData
	offset, err := layer.Seek(0, os.SEEK_SET)
	if err != nil {
		t.Fatalf("error seeking layer: %v", err)
	}

	if offset != 0 {
		t.Fatalf("seek failed: expected 0 offset, got %d", offset)
	}

	p, err := ioutil.ReadAll(layer)
	if err != nil {
		t.Fatalf("error reading all of layer: %v", err)
	}

	if len(p) != int(randomLayerSize) {
		t.Fatalf("layer data read has different length: %v != %v", len(p), randomLayerSize)
	}

	// Reset the randomLayerReader and read back the buffer
	_, err = randomLayerReader.Seek(0, os.SEEK_SET)
	if err != nil {
		t.Fatalf("error resetting layer reader: %v", err)
	}

	randomLayerData, err := ioutil.ReadAll(randomLayerReader)
	if err != nil {
		t.Fatalf("random layer read failed: %v", err)
	}

	if !bytes.Equal(p, randomLayerData) {
		t.Fatalf("layer data not equal")
	}
}
Exemplo n.º 8
0
// TestSimpleBlobUpload covers the blob upload process, exercising common
// error paths that might be seen during an upload.
func TestSimpleBlobUpload(t *testing.T) {
	randomDataReader, dgst, err := testutil.CreateRandomTarFile()
	if err != nil {
		t.Fatalf("error creating random reader: %v", err)
	}

	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
	if err != nil {
		t.Fatalf("error creating registry: %v", err)
	}
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs := repository.Blobs(ctx)

	h := sha256.New()
	rd := io.TeeReader(randomDataReader, h)

	blobUpload, err := bs.Create(ctx)

	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Cancel the upload then restart it
	if err := blobUpload.Cancel(ctx); err != nil {
		t.Fatalf("unexpected error during upload cancellation: %v", err)
	}

	// Do a resume, get unknown upload
	blobUpload, err = bs.Resume(ctx, blobUpload.ID())
	if err != distribution.ErrBlobUploadUnknown {
		t.Fatalf("unexpected error resuming upload, should be unknown: %v", err)
	}

	// Restart!
	blobUpload, err = bs.Create(ctx)
	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Get the size of our random tarfile
	randomDataSize, err := seekerSize(randomDataReader)
	if err != nil {
		t.Fatalf("error getting seeker size of random data: %v", err)
	}

	nn, err := io.Copy(blobUpload, rd)
	if err != nil {
		t.Fatalf("unexpected error uploading layer data: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("layer data write incomplete")
	}

	offset, err := blobUpload.Seek(0, os.SEEK_CUR)
	if err != nil {
		t.Fatalf("unexpected error seeking layer upload: %v", err)
	}

	if offset != nn {
		t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn)
	}
	blobUpload.Close()

	// Do a resume, for good fun
	blobUpload, err = bs.Resume(ctx, blobUpload.ID())
	if err != nil {
		t.Fatalf("unexpected error resuming upload: %v", err)
	}

	sha256Digest := digest.NewDigest("sha256", h)
	desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
	if err != nil {
		t.Fatalf("unexpected error finishing layer upload: %v", err)
	}

	// After finishing an upload, it should no longer exist.
	if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown {
		t.Fatalf("expected layer upload to be unknown, got %v", err)
	}

	// Test for existence.
	statDesc, err := bs.Stat(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
	}

	if statDesc != desc {
		t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
	}

	rc, err := bs.Open(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error opening blob for read: %v", err)
	}
	defer rc.Close()

	h.Reset()
	nn, err = io.Copy(h, rc)
	if err != nil {
		t.Fatalf("error reading layer: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("incorrect read length")
	}

	if digest.NewDigest("sha256", h) != sha256Digest {
		t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
	}

	// Delete a blob
	err = bs.Delete(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("Unexpected error deleting blob")
	}

	d, err := bs.Stat(ctx, desc.Digest)
	if err == nil {
		t.Fatalf("unexpected non-error stating deleted blob: %v", d)
	}

	switch err {
	case distribution.ErrBlobUnknown:
		break
	default:
		t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
	}

	_, err = bs.Open(ctx, desc.Digest)
	if err == nil {
		t.Fatalf("unexpected success opening deleted blob for read")
	}

	switch err {
	case distribution.ErrBlobUnknown:
		break
	default:
		t.Errorf("Unexpected error type getting deleted manifest: %#v", err)
	}

	// Re-upload the blob
	randomBlob, err := ioutil.ReadAll(randomDataReader)
	if err != nil {
		t.Fatalf("Error reading all of blob %s", err.Error())
	}
	expectedDigest := digest.FromBytes(randomBlob)
	simpleUpload(t, bs, randomBlob, expectedDigest)

	d, err = bs.Stat(ctx, expectedDigest)
	if err != nil {
		t.Errorf("unexpected error stat-ing blob")
	}
	if d.Digest != expectedDigest {
		t.Errorf("Mismatching digest with restored blob")
	}

	_, err = bs.Open(ctx, expectedDigest)
	if err != nil {
		t.Errorf("Unexpected error opening blob")
	}

	// Reuse state to test delete with a delete-disabled registry
	registry, err = NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableRedirect)
	if err != nil {
		t.Fatalf("error creating registry: %v", err)
	}
	repository, err = registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs = repository.Blobs(ctx)
	err = bs.Delete(ctx, desc.Digest)
	if err == nil {
		t.Errorf("Unexpected success deleting while disabled")
	}
}
Exemplo n.º 9
0
// TestSimpleBlobRead just creates a simple blob file and ensures that basic
// open, read, seek, read works. More specific edge cases should be covered in
// other tests.
func TestSimpleBlobRead(t *testing.T) {
	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
	if err != nil {
		t.Fatalf("error creating registry: %v", err)
	}
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs := repository.Blobs(ctx)

	randomLayerReader, dgst, err := testutil.CreateRandomTarFile() // TODO(stevvooe): Consider using just a random string.
	if err != nil {
		t.Fatalf("error creating random data: %v", err)
	}

	// Test for existence.
	desc, err := bs.Stat(ctx, dgst)
	if err != distribution.ErrBlobUnknown {
		t.Fatalf("expected not found error when testing for existence: %v", err)
	}

	rc, err := bs.Open(ctx, dgst)
	if err != distribution.ErrBlobUnknown {
		t.Fatalf("expected not found error when opening non-existent blob: %v", err)
	}

	randomLayerSize, err := seekerSize(randomLayerReader)
	if err != nil {
		t.Fatalf("error getting seeker size for random layer: %v", err)
	}

	descBefore := distribution.Descriptor{Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize}
	t.Logf("desc: %v", descBefore)

	desc, err = addBlob(ctx, bs, descBefore, randomLayerReader)
	if err != nil {
		t.Fatalf("error adding blob to blobservice: %v", err)
	}

	if desc.Size != randomLayerSize {
		t.Fatalf("committed blob has incorrect length: %v != %v", desc.Size, randomLayerSize)
	}

	rc, err = bs.Open(ctx, desc.Digest) // note that we are opening with original digest.
	if err != nil {
		t.Fatalf("error opening blob with %v: %v", dgst, err)
	}
	defer rc.Close()

	// Now check the sha digest and ensure its the same
	h := sha256.New()
	nn, err := io.Copy(h, rc)
	if err != nil {
		t.Fatalf("unexpected error copying to hash: %v", err)
	}

	if nn != randomLayerSize {
		t.Fatalf("stored incorrect number of bytes in blob: %d != %d", nn, randomLayerSize)
	}

	sha256Digest := digest.NewDigest("sha256", h)
	if sha256Digest != desc.Digest {
		t.Fatalf("fetched digest does not match: %q != %q", sha256Digest, desc.Digest)
	}

	// Now seek back the blob, read the whole thing and check against randomLayerData
	offset, err := rc.Seek(0, os.SEEK_SET)
	if err != nil {
		t.Fatalf("error seeking blob: %v", err)
	}

	if offset != 0 {
		t.Fatalf("seek failed: expected 0 offset, got %d", offset)
	}

	p, err := ioutil.ReadAll(rc)
	if err != nil {
		t.Fatalf("error reading all of blob: %v", err)
	}

	if len(p) != int(randomLayerSize) {
		t.Fatalf("blob data read has different length: %v != %v", len(p), randomLayerSize)
	}

	// Reset the randomLayerReader and read back the buffer
	_, err = randomLayerReader.Seek(0, os.SEEK_SET)
	if err != nil {
		t.Fatalf("error resetting layer reader: %v", err)
	}

	randomLayerData, err := ioutil.ReadAll(randomLayerReader)
	if err != nil {
		t.Fatalf("random layer read failed: %v", err)
	}

	if !bytes.Equal(p, randomLayerData) {
		t.Fatalf("layer data not equal")
	}
}
Exemplo n.º 10
0
// TestBlobMount covers the blob mount process, exercising common
// error paths that might be seen during a mount.
func TestBlobMount(t *testing.T) {
	randomDataReader, dgst, err := testutil.CreateRandomTarFile()
	if err != nil {
		t.Fatalf("error creating random reader: %v", err)
	}

	ctx := context.Background()
	imageName, _ := reference.ParseNamed("foo/bar")
	sourceImageName, _ := reference.ParseNamed("foo/source")
	driver := inmemory.New()
	registry, err := NewRegistry(ctx, driver, BlobDescriptorCacheProvider(memory.NewInMemoryBlobDescriptorCacheProvider()), EnableDelete, EnableRedirect)
	if err != nil {
		t.Fatalf("error creating registry: %v", err)
	}

	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	sourceRepository, err := registry.Repository(ctx, sourceImageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}

	sbs := sourceRepository.Blobs(ctx)

	blobUpload, err := sbs.Create(ctx)

	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Get the size of our random tarfile
	randomDataSize, err := seekerSize(randomDataReader)
	if err != nil {
		t.Fatalf("error getting seeker size of random data: %v", err)
	}

	nn, err := io.Copy(blobUpload, randomDataReader)
	if err != nil {
		t.Fatalf("unexpected error uploading layer data: %v", err)
	}

	desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
	if err != nil {
		t.Fatalf("unexpected error finishing layer upload: %v", err)
	}

	// Test for existence.
	statDesc, err := sbs.Stat(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error checking for existence: %v, %#v", err, sbs)
	}

	if statDesc != desc {
		t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
	}

	bs := repository.Blobs(ctx)
	// Test destination for existence.
	statDesc, err = bs.Stat(ctx, desc.Digest)
	if err == nil {
		t.Fatalf("unexpected non-error stating unmounted blob: %v", desc)
	}

	canonicalRef, err := reference.WithDigest(sourceRepository.Name(), desc.Digest)
	if err != nil {
		t.Fatal(err)
	}

	bw, err := bs.Create(ctx, WithMountFrom(canonicalRef))
	if bw != nil {
		t.Fatal("unexpected blobwriter returned from Create call, should mount instead")
	}

	ebm, ok := err.(distribution.ErrBlobMounted)
	if !ok {
		t.Fatalf("unexpected error mounting layer: %v", err)
	}

	if ebm.Descriptor != desc {
		t.Fatalf("descriptors not equal: %v != %v", ebm.Descriptor, desc)
	}

	// Test for existence.
	statDesc, err = bs.Stat(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
	}

	if statDesc != desc {
		t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
	}

	rc, err := bs.Open(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error opening blob for read: %v", err)
	}
	defer rc.Close()

	h := sha256.New()
	nn, err = io.Copy(h, rc)
	if err != nil {
		t.Fatalf("error reading layer: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("incorrect read length")
	}

	if digest.NewDigest("sha256", h) != dgst {
		t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), dgst)
	}

	// Delete the blob from the source repo
	err = sbs.Delete(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("Unexpected error deleting blob")
	}

	d, err := bs.Stat(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error stating blob deleted from source repository: %v", err)
	}

	d, err = sbs.Stat(ctx, desc.Digest)
	if err == nil {
		t.Fatalf("unexpected non-error stating deleted blob: %v", d)
	}

	switch err {
	case distribution.ErrBlobUnknown:
		break
	default:
		t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
	}

	// Delete the blob from the dest repo
	err = bs.Delete(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("Unexpected error deleting blob")
	}

	d, err = bs.Stat(ctx, desc.Digest)
	if err == nil {
		t.Fatalf("unexpected non-error stating deleted blob: %v", d)
	}

	switch err {
	case distribution.ErrBlobUnknown:
		break
	default:
		t.Errorf("Unexpected error type stat-ing deleted manifest: %#v", err)
	}
}
Exemplo n.º 11
0
// TestSimpleBlobUpload covers the blob upload process, exercising common
// error paths that might be seen during an upload.
func TestSimpleBlobUpload(t *testing.T) {
	randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile()

	if err != nil {
		t.Fatalf("error creating random reader: %v", err)
	}

	dgst := digest.Digest(tarSumStr)

	if err != nil {
		t.Fatalf("error allocating upload store: %v", err)
	}

	ctx := context.Background()
	imageName := "foo/bar"
	driver := inmemory.New()
	registry := NewRegistryWithDriver(ctx, driver, memory.NewInMemoryBlobDescriptorCacheProvider())
	repository, err := registry.Repository(ctx, imageName)
	if err != nil {
		t.Fatalf("unexpected error getting repo: %v", err)
	}
	bs := repository.Blobs(ctx)

	h := sha256.New()
	rd := io.TeeReader(randomDataReader, h)

	blobUpload, err := bs.Create(ctx)

	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Cancel the upload then restart it
	if err := blobUpload.Cancel(ctx); err != nil {
		t.Fatalf("unexpected error during upload cancellation: %v", err)
	}

	// Do a resume, get unknown upload
	blobUpload, err = bs.Resume(ctx, blobUpload.ID())
	if err != distribution.ErrBlobUploadUnknown {
		t.Fatalf("unexpected error resuming upload, should be unknown: %v", err)
	}

	// Restart!
	blobUpload, err = bs.Create(ctx)
	if err != nil {
		t.Fatalf("unexpected error starting layer upload: %s", err)
	}

	// Get the size of our random tarfile
	randomDataSize, err := seekerSize(randomDataReader)
	if err != nil {
		t.Fatalf("error getting seeker size of random data: %v", err)
	}

	nn, err := io.Copy(blobUpload, rd)
	if err != nil {
		t.Fatalf("unexpected error uploading layer data: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("layer data write incomplete")
	}

	offset, err := blobUpload.Seek(0, os.SEEK_CUR)
	if err != nil {
		t.Fatalf("unexpected error seeking layer upload: %v", err)
	}

	if offset != nn {
		t.Fatalf("blobUpload not updated with correct offset: %v != %v", offset, nn)
	}
	blobUpload.Close()

	// Do a resume, for good fun
	blobUpload, err = bs.Resume(ctx, blobUpload.ID())
	if err != nil {
		t.Fatalf("unexpected error resuming upload: %v", err)
	}

	sha256Digest := digest.NewDigest("sha256", h)
	desc, err := blobUpload.Commit(ctx, distribution.Descriptor{Digest: dgst})
	if err != nil {
		t.Fatalf("unexpected error finishing layer upload: %v", err)
	}

	// After finishing an upload, it should no longer exist.
	if _, err := bs.Resume(ctx, blobUpload.ID()); err != distribution.ErrBlobUploadUnknown {
		t.Fatalf("expected layer upload to be unknown, got %v", err)
	}

	// Test for existence.
	statDesc, err := bs.Stat(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error checking for existence: %v, %#v", err, bs)
	}

	if statDesc != desc {
		t.Fatalf("descriptors not equal: %v != %v", statDesc, desc)
	}

	rc, err := bs.Open(ctx, desc.Digest)
	if err != nil {
		t.Fatalf("unexpected error opening blob for read: %v", err)
	}
	defer rc.Close()

	h.Reset()
	nn, err = io.Copy(h, rc)
	if err != nil {
		t.Fatalf("error reading layer: %v", err)
	}

	if nn != randomDataSize {
		t.Fatalf("incorrect read length")
	}

	if digest.NewDigest("sha256", h) != sha256Digest {
		t.Fatalf("unexpected digest from uploaded layer: %q != %q", digest.NewDigest("sha256", h), sha256Digest)
	}
}