// blobSumAlreadyExists checks if the registry already know about any of the // blobsums passed in the "blobsums" slice. If it finds one that the registry // knows about, it returns the known digest and "true". func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, layersPushed *pushMap) (digest.Digest, bool, error) { layersPushed.Lock() for _, dgst := range blobsums { if layersPushed.layersPushed[dgst] { // it is already known that the push is not needed and // therefore doing a stat is unnecessary layersPushed.Unlock() return dgst, true, nil } } layersPushed.Unlock() for _, dgst := range blobsums { _, err := repo.Blobs(ctx).Stat(ctx, dgst) switch err { case nil: return dgst, true, nil case distribution.ErrBlobUnknown: // nop default: return "", false, err } } return "", false, nil }
// calculateImageSize gets and updates size of each image layer. If manifest v2 is converted to v1, // then it loses information about layers size. We have to get this information from server again. func (isi *ImageStreamImporter) calculateImageSize(ctx gocontext.Context, repo distribution.Repository, image *api.Image) error { bs := repo.Blobs(ctx) layerSet := sets.NewString() size := int64(0) for i := range image.DockerImageLayers { layer := &image.DockerImageLayers[i] if layerSet.Has(layer.Name) { continue } layerSet.Insert(layer.Name) if layerSize, ok := isi.digestToLayerSizeCache[layer.Name]; ok { size += layerSize continue } desc, err := bs.Stat(ctx, digest.Digest(layer.Name)) if err != nil { return err } isi.digestToLayerSizeCache[layer.Name] = desc.Size layer.LayerSize = desc.Size size += desc.Size } image.DockerImageMetadata.Size = size return nil }
// fetch downloads the blob to a tempfile, renames it to the expected name func (r *blobRepo) fetch(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (path string, err error) { defer apexctx.GetLogger(ctx).WithField("digest", dgst).Trace("fetch the blob").Stop(&err) tempFilePath := filepath.Join(r.SpoolPath, fmt.Sprintf("%s-%d", dgst.String(), rand.Int63())) f, err := os.Create(tempFilePath) if err != nil { return "", err } defer f.Close() defer os.Remove(tempFilePath) blob, err := repository.Blobs(ctx).Open(ctx, dgst) if err != nil { return "", err } defer blob.Close() if _, err = io.Copy(f, blob); err != nil { return "", err } f.Close() blob.Close() resultFilePath := filepath.Join(r.SpoolPath, dgst.String()) if err = os.Rename(tempFilePath, resultFilePath); err != nil { return "", err } return resultFilePath, nil }
func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: name, Tag: tag, } for i := 0; i < 2; i++ { wr, err := repository.Blobs(ctx).Create(ctx) if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } rs, ts, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } dgst := digest.Digest(ts) if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } ms, err := repository.Manifests(ctx) if err != nil { t.Fatalf(err.Error()) } ms.Put(sm) if err != nil { t.Fatalf("unexpected errors putting manifest: %v", err) } pl, err := sm.Payload() if err != nil { t.Fatal(err) } return digest.FromBytes(pl) }
// MakeSchema2Manifest constructs a schema 2 manifest from a given list of digests and returns // the digest of the manifest func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) { ctx := context.Background() blobStore := repository.Blobs(ctx) builder := schema2.NewManifestBuilder(blobStore, []byte{}) for _, digest := range digests { builder.AppendReference(distribution.Descriptor{Digest: digest}) } manifest, err := builder.Build(ctx) if err != nil { return nil, fmt.Errorf("unexpected error generating manifest: %v", err) } return manifest, nil }
// blobSumAlreadyExists checks if the registry already know about any of the // blobsums passed in the "blobsums" slice. If it finds one that the registry // knows about, it returns the known digest and "true". func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { for _, dgst := range blobsums { descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst) switch err { case nil: descriptor.MediaType = schema2.MediaTypeLayer return descriptor, true, nil case distribution.ErrBlobUnknown: // nop default: return distribution.Descriptor{}, false, err } } return distribution.Descriptor{}, false, nil }
// UploadBlobs lets you upload blobs to a repository func UploadBlobs(repository distribution.Repository, layers map[digest.Digest]io.ReadSeeker) error { ctx := context.Background() for digest, rs := range layers { wr, err := repository.Blobs(ctx).Create(ctx) if err != nil { return fmt.Errorf("unexpected error creating upload: %v", err) } if _, err := io.Copy(wr, rs); err != nil { return fmt.Errorf("unexpected error copying to upload: %v", err) } if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: digest}); err != nil { return fmt.Errorf("unexpected error committinng upload: %v", err) } } return nil }
// layerAlreadyExists checks if the registry already know about any of the // metadata passed in the "metadata" slice. If it finds one that the registry // knows about, it returns the known digest and "true". func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { for _, meta := range metadata { // Only check blobsums that are known to this repository or have an unknown source if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { continue } descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) switch err { case nil: descriptor.MediaType = schema2.MediaTypeLayer return descriptor, true, nil case distribution.ErrBlobUnknown: // nop default: return distribution.Descriptor{}, false, err } } return distribution.Descriptor{}, false, nil }
// Push pushes a plugin to a registry. func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) { ref, err := reference.ParseNamed(name) if err != nil { return "", err } repoInfo, err := rs.ResolveRepository(ref) if err != nil { return "", err } if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { return "", err } endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname()) if err != nil { return "", err } var confirmedV2 bool var repository distribution.Repository for _, endpoint := range endpoints { if confirmedV2 && endpoint.Version == registry.APIVersion1 { logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) continue } repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull") if err != nil { return "", err } if !confirmedV2 { return "", ErrUnSupportedRegistry } logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) // This means that we found an endpoint. and we are ready to push break } // Returns a reference to the repository's blob service. blobs := repository.Blobs(context.Background()) // Descriptor = {mediaType, size, digest} var descs []distribution.Descriptor for i, f := range []io.ReadCloser{config, layers} { bw, err := blobs.Create(context.Background()) if err != nil { logrus.Debugf("Error in blobs.Create: %v", err) return "", err } h := sha256.New() r := io.TeeReader(f, h) _, err = io.Copy(bw, r) if err != nil { logrus.Debugf("Error in io.Copy: %v", err) return "", err } f.Close() mt := MediaTypeLayer if i == 0 { mt = MediaTypeConfig } // Commit completes the write process to the BlobService. // The descriptor arg to Commit is called the "provisional" descriptor and // used for validation. // The returned descriptor should be the one used. Its called the "Canonical" // descriptor. desc, err := bw.Commit(context.Background(), distribution.Descriptor{ MediaType: mt, // XXX: What about the Size? Digest: digest.NewDigest("sha256", h), }) if err != nil { logrus.Debugf("Error in bw.Commit: %v", err) return "", err } // The canonical descriptor is set the mediatype again, just in case. // Dont touch the digest or the size here. desc.MediaType = mt logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest) descs = append(descs, desc) } // XXX: schema2.Versioned needs a MediaType as well. // "application/vnd.docker.distribution.manifest.v2+json" m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]}) if err != nil { logrus.Debugf("error in schema2.FromStruct: %v", err) return "", err } msv, err := repository.Manifests(context.Background()) if err != nil { logrus.Debugf("error in repository.Manifests: %v", err) return "", err } _, pl, err := m.Payload() if err != nil { logrus.Debugf("error in m.Payload: %v", err) return "", err } logrus.Debugf("Pushed manifest: %s", pl) tag := DefaultTag if tagged, ok := ref.(reference.NamedTagged); ok { tag = tagged.Tag() } return msv.Put(context.Background(), m, distribution.WithTag(tag)) }
// checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect // update counts. Basically, it would make writing tests a lot easier. ctx := context.Background() tag := "thetag" m := manifest.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: repository.Name(), Tag: tag, } blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating test layer: %v", err) } dgst := digest.Digest(ds) wr, err := blobs.Create(ctx) if err != nil { t.Fatalf("error creating layer upload: %v", err) } // Use the resumes, as well! wr, err = blobs.Resume(ctx, wr.ID()) if err != nil { t.Fatalf("error resuming layer upload: %v", err) } io.Copy(wr, rs) if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } m.FSLayers = append(m.FSLayers, manifest.FSLayer{ BlobSum: dgst, }) // Then fetch the blobs if rc, err := blobs.Open(ctx, dgst); err != nil { t.Fatalf("error fetching layer: %v", err) } else { defer rc.Close() } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating key: %v", err) } sm, err := manifest.Sign(&m, pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } manifests, err := repository.Manifests(ctx) if err != nil { t.Fatal(err.Error()) } if err = manifests.Put(sm); err != nil { t.Fatalf("unexpected error putting the manifest: %v", err) } p, err := sm.Payload() if err != nil { t.Fatalf("unexpected error getting manifest payload: %v", err) } dgst, err := digest.FromBytes(p) if err != nil { t.Fatalf("unexpected error digesting manifest payload: %v", err) } fetchedByManifest, err := manifests.Get(dgst) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } if fetchedByManifest.Tag != sm.Tag { t.Fatalf("retrieved unexpected manifest: %v", err) } fetched, err := manifests.GetByTag(tag) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } if fetched.Tag != fetchedByManifest.Tag { t.Fatalf("retrieved unexpected manifest: %v", err) } }
// checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect // update counts. Basically, it would make writing tests a lot easier. ctx := context.Background() tag := "thetag" // todo: change this to use Builder m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: repository.Named().Name(), Tag: tag, } var blobDigests []digest.Digest blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating test layer: %v", err) } dgst := digest.Digest(ds) blobDigests = append(blobDigests, dgst) wr, err := blobs.Create(ctx) if err != nil { t.Fatalf("error creating layer upload: %v", err) } // Use the resumes, as well! wr, err = blobs.Resume(ctx, wr.ID()) if err != nil { t.Fatalf("error resuming layer upload: %v", err) } io.Copy(wr, rs) if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) m.History = append(m.History, schema1.History{ V1Compatibility: "", }) // Then fetch the blobs if rc, err := blobs.Open(ctx, dgst); err != nil { t.Fatalf("error fetching layer: %v", err) } else { defer rc.Close() } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating key: %v", err) } sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } manifests, err := repository.Manifests(ctx) if err != nil { t.Fatal(err.Error()) } var digestPut digest.Digest if digestPut, err = manifests.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting the manifest: %v", err) } dgst := digest.FromBytes(sm.Canonical) if dgst != digestPut { t.Fatalf("mismatching digest from payload and put") } _, err = manifests.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } err = manifests.Delete(ctx, dgst) if err != nil { t.Fatalf("unexpected error deleting blob: %v", err) } for _, d := range blobDigests { err = blobs.Delete(ctx, d) if err != nil { t.Fatalf("unexpected error deleting blob: %v", err) } } }