func (b *bridge) createManifestEvent(action string, repo distribution.Repository, sm *manifest.SignedManifest) (*Event, error) { event := b.createEvent(action) event.Target.MediaType = manifest.ManifestMediaType event.Target.Repository = repo.Name() p, err := sm.Payload() if err != nil { return nil, err } event.Target.Length = int64(len(p)) event.Target.Digest, err = digest.FromBytes(p) if err != nil { return nil, err } // TODO(stevvooe): Currently, the is the "tag" url: once the digest url is // implemented, this should be replaced. event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, sm.Tag) if err != nil { return nil, err } return event, nil }
// calculateImageSize gets and updates size of each image layer. If manifest v2 is converted to v1, // then it loses information about layers size. We have to get this information from server again. func (isi *ImageStreamImporter) calculateImageSize(ctx gocontext.Context, repo distribution.Repository, image *api.Image) error { bs := repo.Blobs(ctx) layerSet := sets.NewString() size := int64(0) for i := range image.DockerImageLayers { layer := &image.DockerImageLayers[i] if layerSet.Has(layer.Name) { continue } layerSet.Insert(layer.Name) if layerSize, ok := isi.digestToLayerSizeCache[layer.Name]; ok { size += layerSize continue } desc, err := bs.Stat(ctx, digest.Digest(layer.Name)) if err != nil { return err } isi.digestToLayerSizeCache[layer.Name] = desc.Size layer.LayerSize = desc.Size size += desc.Size } image.DockerImageMetadata.Size = size return nil }
// blobSumAlreadyExists checks if the registry already know about any of the // blobsums passed in the "blobsums" slice. If it finds one that the registry // knows about, it returns the known digest and "true". func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, layersPushed *pushMap) (digest.Digest, bool, error) { layersPushed.Lock() for _, dgst := range blobsums { if layersPushed.layersPushed[dgst] { // it is already known that the push is not needed and // therefore doing a stat is unnecessary layersPushed.Unlock() return dgst, true, nil } } layersPushed.Unlock() for _, dgst := range blobsums { _, err := repo.Blobs(ctx).Stat(ctx, dgst) switch err { case nil: return dgst, true, nil case distribution.ErrBlobUnknown: // nop default: return "", false, err } } return "", false, nil }
// fetch downloads the blob to a tempfile, renames it to the expected name func (r *blobRepo) fetch(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (path string, err error) { defer apexctx.GetLogger(ctx).WithField("digest", dgst).Trace("fetch the blob").Stop(&err) tempFilePath := filepath.Join(r.SpoolPath, fmt.Sprintf("%s-%d", dgst.String(), rand.Int63())) f, err := os.Create(tempFilePath) if err != nil { return "", err } defer f.Close() defer os.Remove(tempFilePath) blob, err := repository.Blobs(ctx).Open(ctx, dgst) if err != nil { return "", err } defer blob.Close() if _, err = io.Copy(f, blob); err != nil { return "", err } f.Close() blob.Close() resultFilePath := filepath.Join(r.SpoolPath, dgst.String()) if err = os.Rename(tempFilePath, resultFilePath); err != nil { return "", err } return resultFilePath, nil }
// newRepositoryWithClient returns a new repository middleware. func newRepositoryWithClient(registryClient client.Interface, quotaClient kclient.ResourceQuotasNamespacer, ctx context.Context, repo distribution.Repository, options map[string]interface{}) (distribution.Repository, error) { registryAddr := os.Getenv("DOCKER_REGISTRY_URL") if len(registryAddr) == 0 { return nil, errors.New("DOCKER_REGISTRY_URL is required") } pullthrough := false if value, ok := options["pullthrough"]; ok { if b, ok := value.(bool); ok { pullthrough = b } } nameParts := strings.SplitN(repo.Name(), "/", 2) if len(nameParts) != 2 { return nil, fmt.Errorf("invalid repository name %q: it must be of the format <project>/<name>", repo.Name()) } return &repository{ Repository: repo, ctx: ctx, quotaClient: quotaClient, registryClient: registryClient, registryAddr: registryAddr, namespace: nameParts[0], name: nameParts[1], pullthrough: pullthrough, cachedLayers: cachedLayers, }, nil }
// newRepository returns a new repository middleware. func newRepository(ctx context.Context, repo distribution.Repository, options map[string]interface{}) (distribution.Repository, error) { registryAddr := os.Getenv("DOCKER_REGISTRY_URL") if len(registryAddr) == 0 { return nil, errors.New("DOCKER_REGISTRY_URL is required") } registryClient, err := NewRegistryOpenShiftClient() if err != nil { return nil, err } nameParts := strings.SplitN(repo.Name(), "/", 2) if len(nameParts) != 2 { return nil, fmt.Errorf("invalid repository name %q: it must be of the format <project>/<name>", repo.Name()) } return &repository{ Repository: repo, ctx: ctx, registryClient: registryClient, registryAddr: registryAddr, namespace: nameParts[0], name: nameParts[1], }, nil }
func makeManifestService(t *testing.T, repository distribution.Repository) distribution.ManifestService { ctx := context.Background() manifestService, err := repository.Manifests(ctx) if err != nil { t.Fatalf("Failed to construct manifest store: %v", err) } return manifestService }
func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: name, Tag: tag, } for i := 0; i < 2; i++ { wr, err := repository.Blobs(ctx).Create(ctx) if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } rs, ts, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } dgst := digest.Digest(ts) if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } ms, err := repository.Manifests(ctx) if err != nil { t.Fatalf(err.Error()) } ms.Put(sm) if err != nil { t.Fatalf("unexpected errors putting manifest: %v", err) } pl, err := sm.Payload() if err != nil { t.Fatal(err) } return digest.FromBytes(pl) }
// newRepositoryWithClient returns a new repository middleware. func newRepositoryWithClient( registryOSClient client.Interface, quotaClient kcoreclient.ResourceQuotasGetter, limitClient kcoreclient.LimitRangesGetter, ctx context.Context, repo distribution.Repository, options map[string]interface{}, ) (distribution.Repository, error) { registryAddr := os.Getenv(DockerRegistryURLEnvVar) if len(registryAddr) == 0 { return nil, fmt.Errorf("%s is required", DockerRegistryURLEnvVar) } acceptschema2, err := getBoolOption(AcceptSchema2EnvVar, "acceptschema2", false, options) if err != nil { context.GetLogger(ctx).Error(err) } blobrepositorycachettl, err := getDurationOption(BlobRepositoryCacheTTLEnvVar, "blobrepositorycachettl", defaultBlobRepositoryCacheTTL, options) if err != nil { context.GetLogger(ctx).Error(err) } pullthrough, err := getBoolOption(PullthroughEnvVar, "pullthrough", true, options) if err != nil { context.GetLogger(ctx).Error(err) } mirrorPullthrough, err := getBoolOption(MirrorPullthroughEnvVar, "mirrorpullthrough", true, options) if err != nil { context.GetLogger(ctx).Error(err) } nameParts := strings.SplitN(repo.Named().Name(), "/", 2) if len(nameParts) != 2 { return nil, fmt.Errorf("invalid repository name %q: it must be of the format <project>/<name>", repo.Named().Name()) } return &repository{ Repository: repo, ctx: ctx, quotaClient: quotaClient, limitClient: limitClient, registryOSClient: registryOSClient, registryAddr: registryAddr, namespace: nameParts[0], name: nameParts[1], acceptschema2: acceptschema2, blobrepositorycachettl: blobrepositorycachettl, pullthrough: pullthrough, mirrorPullthrough: mirrorPullthrough, cachedLayers: cachedLayers, }, nil }
// MakeSchema2Manifest constructs a schema 2 manifest from a given list of digests and returns // the digest of the manifest func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) { ctx := context.Background() blobStore := repository.Blobs(ctx) builder := schema2.NewManifestBuilder(blobStore, []byte{}) for _, digest := range digests { builder.AppendReference(distribution.Descriptor{Digest: digest}) } manifest, err := builder.Build(ctx) if err != nil { return nil, fmt.Errorf("unexpected error generating manifest: %v", err) } return manifest, nil }
// blobSumAlreadyExists checks if the registry already know about any of the // blobsums passed in the "blobsums" slice. If it finds one that the registry // knows about, it returns the known digest and "true". func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { for _, dgst := range blobsums { descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst) switch err { case nil: descriptor.MediaType = schema2.MediaTypeLayer return descriptor, true, nil case distribution.ErrBlobUnknown: // nop default: return distribution.Descriptor{}, false, err } } return distribution.Descriptor{}, false, nil }
// UploadBlobs lets you upload blobs to a repository func UploadBlobs(repository distribution.Repository, layers map[digest.Digest]io.ReadSeeker) error { ctx := context.Background() for digest, rs := range layers { wr, err := repository.Blobs(ctx).Create(ctx) if err != nil { return fmt.Errorf("unexpected error creating upload: %v", err) } if _, err := io.Copy(wr, rs); err != nil { return fmt.Errorf("unexpected error copying to upload: %v", err) } if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: digest}); err != nil { return fmt.Errorf("unexpected error committinng upload: %v", err) } } return nil }
func (b *bridge) createLayerEvent(action string, repo distribution.Repository, layer distribution.Layer) (*Event, error) { event := b.createEvent(action) event.Target.MediaType = layerMediaType event.Target.Repository = repo.Name() event.Target.Length = layer.Length() dgst := layer.Digest() event.Target.Digest = dgst var err error event.Target.URL, err = b.ub.BuildBlobURL(repo.Name(), dgst) if err != nil { return nil, err } return event, nil }
// layerAlreadyExists checks if the registry already know about any of the // metadata passed in the "metadata" slice. If it finds one that the registry // knows about, it returns the known digest and "true". func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { for _, meta := range metadata { // Only check blobsums that are known to this repository or have an unknown source if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { continue } descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) switch err { case nil: descriptor.MediaType = schema2.MediaTypeLayer return descriptor, true, nil case distribution.ErrBlobUnknown: // nop default: return distribution.Descriptor{}, false, err } } return distribution.Descriptor{}, false, nil }
// newRepositoryWithClient returns a new repository middleware. func newRepositoryWithClient( registryOSClient client.Interface, quotaClient kclient.ResourceQuotasNamespacer, limitClient kclient.LimitRangesNamespacer, ctx context.Context, repo distribution.Repository, options map[string]interface{}, ) (distribution.Repository, error) { registryAddr := os.Getenv(DockerRegistryURLEnvVar) if len(registryAddr) == 0 { return nil, fmt.Errorf("%s is required", DockerRegistryURLEnvVar) } pullthrough := getBoolOption("pullthrough", false, options) acceptschema2 := false if os.Getenv(AcceptSchema2EnvVar) != "" { acceptschema2 = os.Getenv(AcceptSchema2EnvVar) == "true" } else { acceptschema2 = getBoolOption("acceptschema2", false, options) } nameParts := strings.SplitN(repo.Named().Name(), "/", 2) if len(nameParts) != 2 { return nil, fmt.Errorf("invalid repository name %q: it must be of the format <project>/<name>", repo.Named().Name()) } return &repository{ Repository: repo, ctx: ctx, quotaClient: quotaClient, limitClient: limitClient, registryOSClient: registryOSClient, registryAddr: registryAddr, namespace: nameParts[0], name: nameParts[1], pullthrough: pullthrough, acceptschema2: acceptschema2, cachedLayers: cachedLayers, }, nil }
func pushManifest(ctx context.Context, m *schema1.Manifest, privateKey libtrust.PrivateKey, repository distribution.Repository) (string, error) { signed, err := schema1.Sign(m, privateKey) if err != nil { return "", err } manifestDigest, err := digestFromManifest(signed) if err != nil { return "", err } manifests, err := repository.Manifests(ctx) if err != nil { return "", err } log.Printf("manifest: digest: %s", manifestDigest) return string(manifestDigest), manifests.Put(signed) }
// Push pushes a plugin to a registry. func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) { ref, err := reference.ParseNamed(name) if err != nil { return "", err } repoInfo, err := rs.ResolveRepository(ref) if err != nil { return "", err } if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { return "", err } endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname()) if err != nil { return "", err } var confirmedV2 bool var repository distribution.Repository for _, endpoint := range endpoints { if confirmedV2 && endpoint.Version == registry.APIVersion1 { logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) continue } repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull") if err != nil { return "", err } if !confirmedV2 { return "", ErrUnSupportedRegistry } logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) // This means that we found an endpoint. and we are ready to push break } // Returns a reference to the repository's blob service. blobs := repository.Blobs(context.Background()) // Descriptor = {mediaType, size, digest} var descs []distribution.Descriptor for i, f := range []io.ReadCloser{config, layers} { bw, err := blobs.Create(context.Background()) if err != nil { logrus.Debugf("Error in blobs.Create: %v", err) return "", err } h := sha256.New() r := io.TeeReader(f, h) _, err = io.Copy(bw, r) if err != nil { logrus.Debugf("Error in io.Copy: %v", err) return "", err } f.Close() mt := MediaTypeLayer if i == 0 { mt = MediaTypeConfig } // Commit completes the write process to the BlobService. // The descriptor arg to Commit is called the "provisional" descriptor and // used for validation. // The returned descriptor should be the one used. Its called the "Canonical" // descriptor. desc, err := bw.Commit(context.Background(), distribution.Descriptor{ MediaType: mt, // XXX: What about the Size? Digest: digest.NewDigest("sha256", h), }) if err != nil { logrus.Debugf("Error in bw.Commit: %v", err) return "", err } // The canonical descriptor is set the mediatype again, just in case. // Dont touch the digest or the size here. desc.MediaType = mt logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest) descs = append(descs, desc) } // XXX: schema2.Versioned needs a MediaType as well. // "application/vnd.docker.distribution.manifest.v2+json" m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]}) if err != nil { logrus.Debugf("error in schema2.FromStruct: %v", err) return "", err } msv, err := repository.Manifests(context.Background()) if err != nil { logrus.Debugf("error in repository.Manifests: %v", err) return "", err } _, pl, err := m.Payload() if err != nil { logrus.Debugf("error in m.Payload: %v", err) return "", err } logrus.Debugf("Pushed manifest: %s", pl) tag := DefaultTag if tagged, ok := ref.(reference.NamedTagged); ok { tag = tagged.Tag() } return msv.Put(context.Background(), m, distribution.WithTag(tag)) }
// checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect // update counts. Basically, it would make writing tests a lot easier. ctx := context.Background() tag := "thetag" m := manifest.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: repository.Name(), Tag: tag, } blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating test layer: %v", err) } dgst := digest.Digest(ds) wr, err := blobs.Create(ctx) if err != nil { t.Fatalf("error creating layer upload: %v", err) } // Use the resumes, as well! wr, err = blobs.Resume(ctx, wr.ID()) if err != nil { t.Fatalf("error resuming layer upload: %v", err) } io.Copy(wr, rs) if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } m.FSLayers = append(m.FSLayers, manifest.FSLayer{ BlobSum: dgst, }) // Then fetch the blobs if rc, err := blobs.Open(ctx, dgst); err != nil { t.Fatalf("error fetching layer: %v", err) } else { defer rc.Close() } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating key: %v", err) } sm, err := manifest.Sign(&m, pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } manifests, err := repository.Manifests(ctx) if err != nil { t.Fatal(err.Error()) } if err = manifests.Put(sm); err != nil { t.Fatalf("unexpected error putting the manifest: %v", err) } p, err := sm.Payload() if err != nil { t.Fatalf("unexpected error getting manifest payload: %v", err) } dgst, err := digest.FromBytes(p) if err != nil { t.Fatalf("unexpected error digesting manifest payload: %v", err) } fetchedByManifest, err := manifests.Get(dgst) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } if fetchedByManifest.Tag != sm.Tag { t.Fatalf("retrieved unexpected manifest: %v", err) } fetched, err := manifests.GetByTag(tag) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } if fetched.Tag != fetchedByManifest.Tag { t.Fatalf("retrieved unexpected manifest: %v", err) } }
// Pull downloads the plugin from Store func Pull(name string, rs registry.Service, metaheader http.Header, authConfig *types.AuthConfig) (PullData, error) { ref, err := reference.ParseNamed(name) if err != nil { logrus.Debugf("pull.go: error in ParseNamed: %v", err) return nil, err } repoInfo, err := rs.ResolveRepository(ref) if err != nil { logrus.Debugf("pull.go: error in ResolveRepository: %v", err) return nil, err } if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { logrus.Debugf("pull.go: error in ValidateRepoName: %v", err) return nil, err } endpoints, err := rs.LookupPullEndpoints(repoInfo.Hostname()) if err != nil { logrus.Debugf("pull.go: error in LookupPullEndpoints: %v", err) return nil, err } var confirmedV2 bool var repository distribution.Repository for _, endpoint := range endpoints { if confirmedV2 && endpoint.Version == registry.APIVersion1 { logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) continue } // TODO: reuse contexts repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaheader, authConfig, "pull") if err != nil { logrus.Debugf("pull.go: error in NewV2Repository: %v", err) return nil, err } if !confirmedV2 { logrus.Debugf("pull.go: !confirmedV2") return nil, ErrUnSupportedRegistry } logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) break } tag := DefaultTag if ref, ok := ref.(reference.NamedTagged); ok { tag = ref.Tag() } // tags := repository.Tags(context.Background()) // desc, err := tags.Get(context.Background(), tag) // if err != nil { // return nil, err // } // msv, err := repository.Manifests(context.Background()) if err != nil { logrus.Debugf("pull.go: error in repository.Manifests: %v", err) return nil, err } manifest, err := msv.Get(context.Background(), "", distribution.WithTag(tag)) if err != nil { // TODO: change 401 to 404 logrus.Debugf("pull.go: error in msv.Get(): %v", err) return nil, err } _, pl, err := manifest.Payload() if err != nil { logrus.Debugf("pull.go: error in manifest.Payload(): %v", err) return nil, err } var m schema2.Manifest if err := json.Unmarshal(pl, &m); err != nil { logrus.Debugf("pull.go: error in json.Unmarshal(): %v", err) return nil, err } pd := &pullData{ repository: repository, manifest: m, } logrus.Debugf("manifest: %s", pl) return pd, nil }
// checkExerciseRegistry takes the registry through all of its operations, // carrying out generic checks. func checkExerciseRepository(t *testing.T, repository distribution.Repository) { // TODO(stevvooe): This would be a nice testutil function. Basically, it // takes the registry through a common set of operations. This could be // used to make cross-cutting updates by changing internals that affect // update counts. Basically, it would make writing tests a lot easier. ctx := context.Background() tag := "thetag" // todo: change this to use Builder m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: repository.Named().Name(), Tag: tag, } var blobDigests []digest.Digest blobs := repository.Blobs(ctx) for i := 0; i < 2; i++ { rs, ds, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating test layer: %v", err) } dgst := digest.Digest(ds) blobDigests = append(blobDigests, dgst) wr, err := blobs.Create(ctx) if err != nil { t.Fatalf("error creating layer upload: %v", err) } // Use the resumes, as well! wr, err = blobs.Resume(ctx, wr.ID()) if err != nil { t.Fatalf("error resuming layer upload: %v", err) } io.Copy(wr, rs) if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } m.FSLayers = append(m.FSLayers, schema1.FSLayer{ BlobSum: dgst, }) m.History = append(m.History, schema1.History{ V1Compatibility: "", }) // Then fetch the blobs if rc, err := blobs.Open(ctx, dgst); err != nil { t.Fatalf("error fetching layer: %v", err) } else { defer rc.Close() } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating key: %v", err) } sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } manifests, err := repository.Manifests(ctx) if err != nil { t.Fatal(err.Error()) } var digestPut digest.Digest if digestPut, err = manifests.Put(ctx, sm); err != nil { t.Fatalf("unexpected error putting the manifest: %v", err) } dgst := digest.FromBytes(sm.Canonical) if dgst != digestPut { t.Fatalf("mismatching digest from payload and put") } _, err = manifests.Get(ctx, dgst) if err != nil { t.Fatalf("unexpected error fetching manifest: %v", err) } err = manifests.Delete(ctx, dgst) if err != nil { t.Fatalf("unexpected error deleting blob: %v", err) } for _, d := range blobDigests { err = blobs.Delete(ctx, d) if err != nil { t.Fatalf("unexpected error deleting blob: %v", err) } } }