// SetDigest sets the digest for the image layer to the provided value. func (graph *Graph) SetDigest(id string, dgst digest.Digest) error { root := graph.imageRoot(id) if err := ioutil.WriteFile(filepath.Join(root, "checksum"), []byte(dgst.String()), 0600); err != nil { return fmt.Errorf("Error storing digest in %s/checksum: %s", root, err) } return nil }
// checkBlobParentPath asserts that a directory containing blob's link or data // does (not) exist. If repoName is given, link path in _layers directory of // that repository will be checked. Registry's blob store will be checked // otherwise. func checkBlobParentPath(t *testing.T, ctx context.Context, driver *inmemory.Driver, repoName string, dgst digest.Digest, expectExistent bool) { var ( blobPath string err error ) if repoName != "" { blobPath, err = pathFor(layerLinkPathSpec{name: repoName, digest: dgst}) if err != nil { t.Fatalf("failed to get layer link path for repo=%s, digest=%s: %v", repoName, dgst.String(), err) } blobPath = path.Dir(blobPath) } else { blobPath, err = pathFor(blobPathSpec{digest: dgst}) if err != nil { t.Fatalf("failed to get blob path for digest %s: %v", dgst.String(), err) } } parentExists, err := exists(ctx, driver, blobPath) if err != nil { t.Fatalf("failed to check whether path %s exists: %v", blobPath, err) } if expectExistent && !parentExists { t.Errorf("expected blob path %s to exist", blobPath) } else if !expectExistent && parentExists { t.Errorf("expected blob path %s not to exist", blobPath) } }
func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error { if t.refs == nil { t.refs = make(map[string]string) } t.refs[ref.String()] = id.String() return nil }
func schema2ToImage(manifest *schema2.DeserializedManifest, imageConfig []byte, d digest.Digest) (*api.Image, error) { mediatype, payload, err := manifest.Payload() if err != nil { return nil, err } dockerImage, err := unmarshalDockerImage(imageConfig) if err != nil { return nil, err } if len(d) > 0 { dockerImage.ID = d.String() } else { dockerImage.ID = digest.FromBytes(payload).String() } image := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, DockerImageMetadata: *dockerImage, DockerImageManifest: string(payload), DockerImageConfig: string(imageConfig), DockerImageManifestMediaType: mediatype, DockerImageMetadataVersion: "1.0", } return image, nil }
// Get retrieves the manifest with digest `dgst`. func (r *repository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { if err := r.checkPendingErrors(ctx); err != nil { return nil, err } if _, err := r.getImageStreamImage(dgst); err != nil { context.GetLogger(r.ctx).Errorf("error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err) return nil, err } image, err := r.getImage(dgst) if err != nil { context.GetLogger(r.ctx).Errorf("error retrieving image %s: %v", dgst.String(), err) return nil, err } ref := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name, Registry: r.registryAddr} if managed := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; managed == "true" { // Repository without a registry part is refers to repository containing locally managed images. // Such an entry is retrieved, checked and set by blobDescriptorService operating only on local blobs. ref.Registry = "" } else { // Repository with a registry points to remote repository. This is used by pullthrough middleware. ref = ref.DockerClientDefaults().AsRepository() } manifest, err := r.manifestFromImageWithCachedLayers(image, ref.Exact()) return manifest, err }
// digestPathComponents provides a consistent path breakdown for a given // digest. For a generic digest, it will be as follows: // // <algorithm>/<hex digest> // // Most importantly, for tarsum, the layout looks like this: // // tarsum/<version>/<digest algorithm>/<full digest> // // If multilevel is true, the first two bytes of the digest will separate // groups of digest folder. It will be as follows: // // <algorithm>/<first two bytes of digest>/<full digest> // func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { if err := dgst.Validate(); err != nil { return nil, err } algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) hex := dgst.Hex() prefix := []string{algorithm} var suffix []string if multilevel { suffix = append(suffix, hex[:2]) } suffix = append(suffix, hex) if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { // We have a tarsum! version := tsi.Version if version == "" { version = "v0" } prefix = []string{ "tarsum", version, tsi.Algorithm, } } return append(prefix, suffix...), nil }
func schema1ToImage(manifest *schema1.SignedManifest, d digest.Digest) (*api.Image, error) { if len(manifest.History) == 0 { return nil, fmt.Errorf("image has no v1Compatibility history and cannot be used") } dockerImage, err := unmarshalDockerImage([]byte(manifest.History[0].V1Compatibility)) if err != nil { return nil, err } mediatype, payload, err := manifest.Payload() if err != nil { return nil, err } if len(d) > 0 { dockerImage.ID = d.String() } else { dockerImage.ID = digest.FromBytes(manifest.Canonical).String() } image := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, DockerImageMetadata: *dockerImage, DockerImageManifest: string(payload), DockerImageManifestMediaType: mediatype, DockerImageMetadataVersion: "1.0", } return image, nil }
// Stat returns a a blob descriptor if the given blob is either linked in repository or is referenced in // corresponding image stream. This method is invoked from inside of upstream's linkedBlobStore. It expects // a proper repository object to be set on given context by upper openshift middleware wrappers. func (bs *blobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { repo, found := RepositoryFrom(ctx) if !found || repo == nil { err := fmt.Errorf("failed to retrieve repository from context") context.GetLogger(ctx).Error(err) return distribution.Descriptor{}, err } // if there is a repo layer link, return its descriptor desc, err := bs.BlobDescriptorService.Stat(ctx, dgst) if err == nil { // and remember the association repo.cachedLayers.RememberDigest(dgst, repo.blobrepositorycachettl, imageapi.DockerImageReference{ Namespace: repo.namespace, Name: repo.name, }.Exact()) return desc, nil } context.GetLogger(ctx).Debugf("could not stat layer link %q in repository %q: %v", dgst.String(), repo.Named().Name(), err) // verify the blob is stored locally desc, err = dockerRegistry.BlobStatter().Stat(ctx, dgst) if err != nil { return desc, err } // ensure it's referenced inside of corresponding image stream if imageStreamHasBlob(repo, dgst) { return desc, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown }
func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) }
func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { served, err := pbs.serveLocal(ctx, w, r, dgst) if err != nil { context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) return err } if served { return nil } mu.Lock() _, ok := inflight[dgst] if ok { mu.Unlock() _, err := pbs.copyContent(ctx, dgst, w) return err } inflight[dgst] = struct{}{} mu.Unlock() go func(dgst digest.Digest) { if err := pbs.storeLocal(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } pbs.scheduler.AddBlob(dgst.String(), repositoryTTL) }(dgst) _, err = pbs.copyContent(ctx, dgst, w) if err != nil { return err } return nil }
func (graph *Graph) setLayerDigest(id string, dgst digest.Digest) error { root := graph.imageRoot(id) if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil { return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err) } return nil }
// ServeBlob attempts to serve the requested digest onto w, using a remote proxy store if necessary. // Important! This function is called for GET and HEAD requests. Docker client uses[1] HEAD request // to check existence of a layer. If the layer with the digest is available, this function MUST return // success response with no actual body content. // [1] https://docs.docker.com/registry/spec/api/#existing-layers func (pbs *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { store, ok := pbs.digestToStore[dgst.String()] if !ok { return pbs.BlobStore.ServeBlob(ctx, w, req, dgst) } // store the content locally if requested, but ensure only one instance at a time // is storing to avoid excessive local writes if pbs.mirror { mu.Lock() if _, ok = inflight[dgst]; ok { mu.Unlock() context.GetLogger(ctx).Infof("Serving %q while mirroring in background", dgst) _, err := pbs.copyContent(store, ctx, dgst, w, req) return err } inflight[dgst] = struct{}{} mu.Unlock() go func(dgst digest.Digest) { context.GetLogger(ctx).Infof("Start background mirroring of %q", dgst) if err := pbs.storeLocal(store, ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } context.GetLogger(ctx).Infof("Completed mirroring of %q", dgst) }(dgst) } _, err := pbs.copyContent(store, ctx, dgst, w, req) return err }
// fetch downloads the blob to a tempfile, renames it to the expected name func (r *blobRepo) fetch(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (path string, err error) { defer apexctx.GetLogger(ctx).WithField("digest", dgst).Trace("fetch the blob").Stop(&err) tempFilePath := filepath.Join(r.SpoolPath, fmt.Sprintf("%s-%d", dgst.String(), rand.Int63())) f, err := os.Create(tempFilePath) if err != nil { return "", err } defer f.Close() defer os.Remove(tempFilePath) blob, err := repository.Blobs(ctx).Open(ctx, dgst) if err != nil { return "", err } defer blob.Close() if _, err = io.Copy(f, blob); err != nil { return "", err } f.Close() blob.Close() resultFilePath := filepath.Join(r.SpoolPath, dgst.String()) if err = os.Rename(tempFilePath, resultFilePath); err != nil { return "", err } return resultFilePath, nil }
func (pms proxyManifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { sm, err := pms.localManifests.Get(dgst) if err == nil { proxyMetrics.ManifestPush(uint64(len(sm.Raw))) return sm, err } sm, err = pms.remoteManifests.Get(dgst) if err != nil { return nil, err } proxyMetrics.ManifestPull(uint64(len(sm.Raw))) err = pms.localManifests.Put(sm) if err != nil { return nil, err } // Schedule the repo for removal pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) // Ensure the manifest blob is cleaned up pms.scheduler.AddBlob(dgst.String(), repositoryTTL) proxyMetrics.ManifestPush(uint64(len(sm.Raw))) return sm, err }
// RepositoriesForDigest returns a list of repositories that may contain this digest. func (c digestToRepositoryCache) RepositoriesForDigest(dgst digest.Digest) []string { value, ok := c.Get(dgst.String()) if !ok { return nil } repos := value.(*repositoryBucket) return repos.Copy() }
func (c digestToRepositoryCache) RepositoryHasBlob(repo string, dgst digest.Digest) bool { value, ok := c.Get(dgst.String()) if !ok { return false } repos := value.(*repositoryBucket) return repos.Has(repo) }
// ForgetDigest removes an association between given digest and repository from the cache. func (c digestToRepositoryCache) ForgetDigest(dgst digest.Digest, repo string) { key := dgst.String() value, ok := c.Peek(key) if !ok { return } repos := value.(*repositoryBucket) repos.Remove(repo) }
// imageStreamHasBlob returns true if the given blob digest is referenced in image stream corresponding to // given repository. If not found locally, image stream's images will be iterated and fetched from newest to // oldest until found. Each processed image will update local cache of blobs. func imageStreamHasBlob(r *repository, dgst digest.Digest) bool { repoCacheName := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name}.Exact() if r.cachedLayers.RepositoryHasBlob(repoCacheName, dgst) { context.GetLogger(r.ctx).Debugf("found cached blob %q in repository %s", dgst.String(), r.Named().Name()) return true } context.GetLogger(r.ctx).Debugf("verifying presence of blob %q in image stream %s/%s", dgst.String(), r.namespace, r.name) started := time.Now() logFound := func(found bool) bool { elapsed := time.Now().Sub(started) if found { context.GetLogger(r.ctx).Debugf("verified presence of blob %q in image stream %s/%s after %s", dgst.String(), r.namespace, r.name, elapsed.String()) } else { context.GetLogger(r.ctx).Debugf("detected absence of blob %q in image stream %s/%s after %s", dgst.String(), r.namespace, r.name, elapsed.String()) } return found } // verify directly with etcd is, err := r.getImageStream() if err != nil { context.GetLogger(r.ctx).Errorf("failed to get image stream: %v", err) return logFound(false) } tagEvents := []*imageapi.TagEvent{} event2Name := make(map[*imageapi.TagEvent]string) for name, eventList := range is.Status.Tags { for i := range eventList.Items { event := &eventList.Items[i] tagEvents = append(tagEvents, event) event2Name[event] = name } } // search from youngest to oldest sort.Sort(ByGeneration(tagEvents)) processedImages := map[string]struct{}{} for _, tagEvent := range tagEvents { if _, processed := processedImages[tagEvent.Image]; processed { continue } if imageHasBlob(r, repoCacheName, tagEvent.Image, dgst.String(), !r.pullthrough) { tagName := event2Name[tagEvent] context.GetLogger(r.ctx).Debugf("blob found under istag %s/%s:%s in image %s", r.namespace, r.name, tagName, tagEvent.Image) return logFound(true) } processedImages[tagEvent.Image] = struct{}{} } context.GetLogger(r.ctx).Warnf("blob %q exists locally but is not referenced in repository %s/%s", dgst.String(), r.namespace, r.name) return logFound(false) }
// AddBlob schedules a blob cleanup after ttl expires func (ttles *TTLExpirationScheduler) AddBlob(dgst digest.Digest, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() if ttles.stopped { return fmt.Errorf("scheduler not started") } ttles.add(dgst.String(), ttl, entryTypeBlob) return nil }
// BuildBlobURL constructs the url for the blob identified by name and dgst. func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { route := ub.cloneRoute(RouteNameBlob) layerURL, err := route.URL("name", name, "digest", dgst.String()) if err != nil { return "", err } return layerURL.String(), nil }
func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { m.repo.requests = append(m.repo.requests, dgst.String()) if err, exists := m.repo.errors[dgst]; exists { return distribution.Descriptor{}, err } if desc, exists := m.repo.blobs[dgst]; exists { return desc, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown }
// RememberDigest associates a digest with a repository. func (c digestToRepositoryCache) RememberDigest(dgst digest.Digest, repo string) { key := dgst.String() value, ok := c.Get(key) if !ok { value = &repositoryBucket{} if ok, _ := c.ContainsOrAdd(key, value); !ok { return } } repos := value.(*repositoryBucket) repos.Add(repo) }
func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { desc, err := pbs.localStore.Stat(ctx, dgst) if err != nil && err != distribution.ErrBlobUnknown { return err } if err == nil { proxyMetrics.BlobPush(uint64(desc.Size)) return pbs.localStore.ServeBlob(ctx, w, r, dgst) } desc, err = pbs.remoteStore.Stat(ctx, dgst) if err != nil { return err } remoteReader, err := pbs.remoteStore.Open(ctx, dgst) if err != nil { return err } bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc) if err != nil { return err } defer cleanup() if isNew { go func() { err := streamToStorage(ctx, remoteReader, desc, bw) if err != nil { context.GetLogger(ctx).Error(err) } proxyMetrics.BlobPull(uint64(desc.Size)) }() err := streamToClient(ctx, w, desc, bw) if err != nil { return err } proxyMetrics.BlobPush(uint64(desc.Size)) pbs.scheduler.AddBlob(dgst.String(), blobTTL) return nil } err = streamToClient(ctx, w, desc, bw) if err != nil { return err } proxyMetrics.BlobPush(uint64(desc.Size)) return nil }
// Stat makes a local check for the blob, then falls through to the other servers referenced by // the image stream and looks for those that have the layer. func (r *pullthroughBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { // check the local store for the blob desc, err := r.BlobStore.Stat(ctx, dgst) switch { case err == distribution.ErrBlobUnknown: // continue on to the code below and look up the blob in a remote store since it is not in // the local store case err != nil: context.GetLogger(r.repo.ctx).Errorf("Failed to find blob %q: %#v", dgst.String(), err) fallthrough default: return desc, err } // look up the potential remote repositories that this blob could be part of (at this time, // we don't know which image in the image stream surfaced the content). is, err := r.repo.getImageStream() if err != nil { if errors.IsNotFound(err) || errors.IsForbidden(err) { return distribution.Descriptor{}, distribution.ErrBlobUnknown } context.GetLogger(r.repo.ctx).Errorf("Error retrieving image stream for blob: %s", err) return distribution.Descriptor{}, err } var localRegistry string if local, err := imageapi.ParseDockerImageReference(is.Status.DockerImageRepository); err == nil { // TODO: normalize further? localRegistry = local.Registry } retriever := r.repo.importContext() cached := r.repo.cachedLayers.RepositoriesForDigest(dgst) // look at the first level of tagged repositories first search := identifyCandidateRepositories(is, localRegistry, true) if desc, err := r.findCandidateRepository(ctx, search, cached, dgst, retriever); err == nil { return desc, nil } // look at all other repositories tagged by the server secondary := identifyCandidateRepositories(is, localRegistry, false) for k := range search { delete(secondary, k) } if desc, err := r.findCandidateRepository(ctx, secondary, cached, dgst, retriever); err == nil { return desc, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown }
func (r *blobRepo) Get(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (string, error) { apexctx.GetLogger(ctx).WithField("digest", dgst).Info("get a blob from Repository") path := filepath.Join(r.BlobRepositoryConfig.SpoolPath, dgst.String()) _, err := os.Lstat(path) if err == nil { apexctx.GetLogger(ctx).WithField("digest", dgst).Info("the blob has already downloaded") return path, nil } if !os.IsNotExist(err) { return "", err } return r.download(ctx, repository, dgst) }
// ServeBlob attempts to serve the requested digest onto w, using a remote proxy store if necessary. func (r *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { store, ok := r.digestToStore[dgst.String()] if !ok { return r.BlobStore.ServeBlob(ctx, w, req, dgst) } desc, err := store.Stat(ctx, dgst) if err != nil { context.GetLogger(ctx).Errorf("Failed to stat digest %q: %v", dgst.String(), err) return err } remoteReader, err := store.Open(ctx, dgst) if err != nil { context.GetLogger(ctx).Errorf("Failure to open remote store for digest %q: %v", dgst.String(), err) return err } defer remoteReader.Close() setResponseHeaders(w, desc.Size, desc.MediaType, dgst) context.GetLogger(ctx).Infof("serving blob %s of type %s %d bytes long", dgst.String(), desc.MediaType, desc.Size) http.ServeContent(w, req, desc.Digest.String(), time.Time{}, remoteReader) return nil }
// Get retrieves the manifest with digest `dgst`. func (r *repository) Get(ctx context.Context, dgst digest.Digest) (*manifest.SignedManifest, error) { if _, err := r.getImageStreamImage(ctx, dgst); err != nil { log.Errorf("Error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err) return nil, err } image, err := r.getImage(dgst) if err != nil { log.Errorf("Error retrieving image %s: %v", dgst.String(), err) return nil, err } return r.manifestFromImage(image) }
// Get retrieves the manifest with digest `dgst`. func (r *repository) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { if _, err := r.getImageStreamImage(dgst); err != nil { context.GetLogger(r.ctx).Errorf("Error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err) return nil, err } image, err := r.getImage(dgst) if err != nil { context.GetLogger(r.ctx).Errorf("Error retrieving image %s: %v", dgst.String(), err) return nil, err } ref := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name, Registry: r.registryAddr} return r.manifestFromImageWithCachedLayers(image, ref.DockerClientDefaults().Exact()) }
// Stat makes a local check for the blob, then falls through to the other servers referenced by // the image stream and looks for those that have the layer. func (r *pullthroughBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { // check the local store for the blob desc, err := r.BlobStore.Stat(ctx, dgst) switch { case err == distribution.ErrBlobUnknown: // continue on to the code below and look up the blob in a remote store since it is not in // the local store case err != nil: context.GetLogger(ctx).Errorf("Failed to find blob %q: %#v", dgst.String(), err) fallthrough default: return desc, err } return r.remoteStat(ctx, dgst) }
func (t *testBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { t.calls["ServeBlob"]++ content, exists := t.blobs[dgst] if !exists { return distribution.ErrBlobUnknown } reader := bytes.NewReader(content) setResponseHeaders(w, int64(len(content)), "application/octet-stream", dgst) http.ServeContent(w, req, dgst.String(), time.Time{}, reader) n, err := reader.Seek(0, 1) if err != nil { return err } t.bytesServed = n return nil }