// checkBlobParentPath asserts that a directory containing blob's link or data // does (not) exist. If repoName is given, link path in _layers directory of // that repository will be checked. Registry's blob store will be checked // otherwise. func checkBlobParentPath(t *testing.T, ctx context.Context, driver *inmemory.Driver, repoName string, dgst digest.Digest, expectExistent bool) { var ( blobPath string err error ) if repoName != "" { blobPath, err = pathFor(layerLinkPathSpec{name: repoName, digest: dgst}) if err != nil { t.Fatalf("failed to get layer link path for repo=%s, digest=%s: %v", repoName, dgst.String(), err) } blobPath = path.Dir(blobPath) } else { blobPath, err = pathFor(blobPathSpec{digest: dgst}) if err != nil { t.Fatalf("failed to get blob path for digest %s: %v", dgst.String(), err) } } parentExists, err := exists(ctx, driver, blobPath) if err != nil { t.Fatalf("failed to check whether path %s exists: %v", blobPath, err) } if expectExistent && !parentExists { t.Errorf("expected blob path %s to exist", blobPath) } else if !expectExistent && parentExists { t.Errorf("expected blob path %s not to exist", blobPath) } }
// GetRemoteCA returns the remote endpoint's CA certificate func GetRemoteCA(ctx context.Context, d digest.Digest, picker *picker.Picker) (RootCA, error) { // We need a valid picker to be able to Dial to a remote CA if picker == nil { return RootCA{}, fmt.Errorf("valid remote address picker required") } // This TLS Config is intentionally using InsecureSkipVerify. Either we're // doing TOFU, in which case we don't validate the remote CA, or we're using // a user supplied hash to check the integrity of the CA certificate. insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) opts := []grpc.DialOption{ grpc.WithTransportCredentials(insecureCreds), grpc.WithBackoffMaxDelay(10 * time.Second), grpc.WithPicker(picker)} firstAddr, err := picker.PickAddr() if err != nil { return RootCA{}, err } conn, err := grpc.Dial(firstAddr, opts...) if err != nil { return RootCA{}, err } defer conn.Close() client := api.NewCAClient(conn) response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{}) if err != nil { return RootCA{}, err } if d != "" { verifier, err := digest.NewDigestVerifier(d) if err != nil { return RootCA{}, fmt.Errorf("unexpected error getting digest verifier: %v", err) } io.Copy(verifier, bytes.NewReader(response.Certificate)) if !verifier.Verified() { return RootCA{}, fmt.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex()) } } // Check the validity of the remote Cert _, err = helpers.ParseCertificatePEM(response.Certificate) if err != nil { return RootCA{}, err } // Create a Pool with our RootCACertificate pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(response.Certificate) { return RootCA{}, fmt.Errorf("failed to append certificate to cert pool") } return RootCA{Cert: response.Certificate, Pool: pool}, nil }
func hex(d digest.Digest) string { if d == "" { return "" } return d.Hex() }
// Stat ensures that the digest is a member of the specified repository and // forwards the descriptor request to the global blob store. If the media type // differs for the repository, we override it. func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } conn := rsrbds.upstream.pool.Get() defer conn.Close() // Check membership to repository first member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst)) if err != nil { return distribution.Descriptor{}, err } if !member { return distribution.Descriptor{}, distribution.ErrBlobUnknown } upstream, err := rsrbds.upstream.stat(ctx, conn, dgst) if err != nil { return distribution.Descriptor{}, err } // We allow a per repository mediatype, let's look it up here. mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype")) if err != nil { return distribution.Descriptor{}, err } if mediatype != "" { upstream.MediaType = mediatype } return upstream, nil }
// Get retrieves the manifest with digest `dgst`. func (r *repository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { if err := r.checkPendingErrors(ctx); err != nil { return nil, err } if _, err := r.getImageStreamImage(dgst); err != nil { context.GetLogger(r.ctx).Errorf("error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err) return nil, err } image, err := r.getImage(dgst) if err != nil { context.GetLogger(r.ctx).Errorf("error retrieving image %s: %v", dgst.String(), err) return nil, err } ref := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name, Registry: r.registryAddr} if managed := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; managed == "true" { // Repository without a registry part is refers to repository containing locally managed images. // Such an entry is retrieved, checked and set by blobDescriptorService operating only on local blobs. ref.Registry = "" } else { // Repository with a registry points to remote repository. This is used by pullthrough middleware. ref = ref.DockerClientDefaults().AsRepository() } manifest, err := r.manifestFromImageWithCachedLayers(image, ref.Exact()) return manifest, err }
func (t *mockTagAdder) AddTag(ref reference.Named, id digest.Digest, force bool) error { if t.refs == nil { t.refs = make(map[string]string) } t.refs[ref.String()] = id.String() return nil }
func schema1ToImage(manifest *schema1.SignedManifest, d digest.Digest) (*api.Image, error) { if len(manifest.History) == 0 { return nil, fmt.Errorf("image has no v1Compatibility history and cannot be used") } dockerImage, err := unmarshalDockerImage([]byte(manifest.History[0].V1Compatibility)) if err != nil { return nil, err } mediatype, payload, err := manifest.Payload() if err != nil { return nil, err } if len(d) > 0 { dockerImage.ID = d.String() } else { dockerImage.ID = digest.FromBytes(manifest.Canonical).String() } image := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, DockerImageMetadata: *dockerImage, DockerImageManifest: string(payload), DockerImageManifestMediaType: mediatype, DockerImageMetadataVersion: "1.0", } return image, nil }
func schema2ToImage(manifest *schema2.DeserializedManifest, imageConfig []byte, d digest.Digest) (*api.Image, error) { mediatype, payload, err := manifest.Payload() if err != nil { return nil, err } dockerImage, err := unmarshalDockerImage(imageConfig) if err != nil { return nil, err } if len(d) > 0 { dockerImage.ID = d.String() } else { dockerImage.ID = digest.FromBytes(payload).String() } image := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, DockerImageMetadata: *dockerImage, DockerImageManifest: string(payload), DockerImageConfig: string(imageConfig), DockerImageManifestMediaType: mediatype, DockerImageMetadataVersion: "1.0", } return image, nil }
// Stat returns a a blob descriptor if the given blob is either linked in repository or is referenced in // corresponding image stream. This method is invoked from inside of upstream's linkedBlobStore. It expects // a proper repository object to be set on given context by upper openshift middleware wrappers. func (bs *blobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { repo, found := RepositoryFrom(ctx) if !found || repo == nil { err := fmt.Errorf("failed to retrieve repository from context") context.GetLogger(ctx).Error(err) return distribution.Descriptor{}, err } // if there is a repo layer link, return its descriptor desc, err := bs.BlobDescriptorService.Stat(ctx, dgst) if err == nil { // and remember the association repo.cachedLayers.RememberDigest(dgst, repo.blobrepositorycachettl, imageapi.DockerImageReference{ Namespace: repo.namespace, Name: repo.name, }.Exact()) return desc, nil } context.GetLogger(ctx).Debugf("could not stat layer link %q in repository %q: %v", dgst.String(), repo.Named().Name(), err) // verify the blob is stored locally desc, err = dockerRegistry.BlobStatter().Stat(ctx, dgst) if err != nil { return desc, err } // ensure it's referenced inside of corresponding image stream if imageStreamHasBlob(repo, dgst) { return desc, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown }
func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { served, err := pbs.serveLocal(ctx, w, r, dgst) if err != nil { context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error()) return err } if served { return nil } mu.Lock() _, ok := inflight[dgst] if ok { mu.Unlock() _, err := pbs.copyContent(ctx, dgst, w) return err } inflight[dgst] = struct{}{} mu.Unlock() go func(dgst digest.Digest) { if err := pbs.storeLocal(ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } pbs.scheduler.AddBlob(dgst.String(), repositoryTTL) }(dgst) _, err = pbs.copyContent(ctx, dgst, w) if err != nil { return err } return nil }
func (graph *Graph) setLayerDigest(id string, dgst digest.Digest) error { root := graph.imageRoot(id) if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil { return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err) } return nil }
func addTestFetch(repo string, dgst digest.Digest, content []byte, m *testutil.RequestResponseMap) { *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "GET", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) *m = append(*m, testutil.RequestResponseMapping{ Request: testutil.Request{ Method: "HEAD", Route: "/v2/" + repo + "/blobs/" + dgst.String(), }, Response: testutil.Response{ StatusCode: http.StatusOK, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), }, }) }
func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) if err != nil { return 0, err } return stat.Size(), nil }
// ServeBlob attempts to serve the requested digest onto w, using a remote proxy store if necessary. // Important! This function is called for GET and HEAD requests. Docker client uses[1] HEAD request // to check existence of a layer. If the layer with the digest is available, this function MUST return // success response with no actual body content. // [1] https://docs.docker.com/registry/spec/api/#existing-layers func (pbs *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { store, ok := pbs.digestToStore[dgst.String()] if !ok { return pbs.BlobStore.ServeBlob(ctx, w, req, dgst) } // store the content locally if requested, but ensure only one instance at a time // is storing to avoid excessive local writes if pbs.mirror { mu.Lock() if _, ok = inflight[dgst]; ok { mu.Unlock() context.GetLogger(ctx).Infof("Serving %q while mirroring in background", dgst) _, err := pbs.copyContent(store, ctx, dgst, w, req) return err } inflight[dgst] = struct{}{} mu.Unlock() go func(dgst digest.Digest) { context.GetLogger(ctx).Infof("Start background mirroring of %q", dgst) if err := pbs.storeLocal(store, ctx, dgst); err != nil { context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error()) } context.GetLogger(ctx).Infof("Completed mirroring of %q", dgst) }(dgst) } _, err := pbs.copyContent(store, ctx, dgst, w, req) return err }
// fetch downloads the blob to a tempfile, renames it to the expected name func (r *blobRepo) fetch(ctx context.Context, repository distribution.Repository, dgst digest.Digest) (path string, err error) { defer apexctx.GetLogger(ctx).WithField("digest", dgst).Trace("fetch the blob").Stop(&err) tempFilePath := filepath.Join(r.SpoolPath, fmt.Sprintf("%s-%d", dgst.String(), rand.Int63())) f, err := os.Create(tempFilePath) if err != nil { return "", err } defer f.Close() defer os.Remove(tempFilePath) blob, err := repository.Blobs(ctx).Open(ctx, dgst) if err != nil { return "", err } defer blob.Close() if _, err = io.Copy(f, blob); err != nil { return "", err } f.Close() blob.Close() resultFilePath := filepath.Join(r.SpoolPath, dgst.String()) if err = os.Rename(tempFilePath, resultFilePath); err != nil { return "", err } return resultFilePath, nil }
func (pms proxyManifestStore) Get(dgst digest.Digest) (*schema1.SignedManifest, error) { sm, err := pms.localManifests.Get(dgst) if err == nil { proxyMetrics.ManifestPush(uint64(len(sm.Raw))) return sm, err } sm, err = pms.remoteManifests.Get(dgst) if err != nil { return nil, err } proxyMetrics.ManifestPull(uint64(len(sm.Raw))) err = pms.localManifests.Put(sm) if err != nil { return nil, err } // Schedule the repo for removal pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL) // Ensure the manifest blob is cleaned up pms.scheduler.AddBlob(dgst.String(), repositoryTTL) proxyMetrics.ManifestPush(uint64(len(sm.Raw))) return sm, err }
// SetDigest sets the digest for the image layer to the provided value. func (graph *Graph) SetDigest(id string, dgst digest.Digest) error { root := graph.imageRoot(id) if err := ioutil.WriteFile(filepath.Join(root, "checksum"), []byte(dgst.String()), 0600); err != nil { return fmt.Errorf("Error storing digest in %s/checksum: %s", root, err) } return nil }
func (t *testRegistryV2) getBlobFilename(blobDigest digest.Digest) string { // Split the digest into its algorithm and hex components. dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() // The path to the target blob data looks something like: // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", t.dir, dgstAlg, dgstHex[:2], dgstHex) }
func (c digestToRepositoryCache) RepositoryHasBlob(repo string, dgst digest.Digest) bool { value, ok := c.Get(dgst.String()) if !ok { return false } repos := value.(*repositoryBucket) return repos.Has(repo) }
// RepositoriesForDigest returns a list of repositories that may contain this digest. func (c digestToRepositoryCache) RepositoriesForDigest(dgst digest.Digest) []string { value, ok := c.Get(dgst.String()) if !ok { return nil } repos := value.(*repositoryBucket) return repos.Copy() }
func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { img, err := s.is.Get(id) if err != nil { return nil, err } if len(img.RootFS.DiffIDs) == 0 { return nil, fmt.Errorf("empty export - not implemented") } var parent digest.Digest var layers []string var foreignSrcs map[layer.DiffID]distribution.Descriptor for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{ Created: img.Created, } if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return nil, err } v1Img.ID = v1ID.Hex() if parent != "" { v1Img.Parent = parent.Hex() } src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) if err != nil { return nil, err } layers = append(layers, v1Img.ID) parent = v1ID if src.Digest != "" { if foreignSrcs == nil { foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) } foreignSrcs[img.RootFS.DiffIDs[i]] = src } } configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { return nil, err } if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { return nil, err } s.images[id].layers = layers return foreignSrcs, nil }
// ForgetDigest removes an association between given digest and repository from the cache. func (c digestToRepositoryCache) ForgetDigest(dgst digest.Digest, repo string) { key := dgst.String() value, ok := c.Peek(key) if !ok { return } repos := value.(*repositoryBucket) repos.Remove(repo) }
// imageStreamHasBlob returns true if the given blob digest is referenced in image stream corresponding to // given repository. If not found locally, image stream's images will be iterated and fetched from newest to // oldest until found. Each processed image will update local cache of blobs. func imageStreamHasBlob(r *repository, dgst digest.Digest) bool { repoCacheName := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name}.Exact() if r.cachedLayers.RepositoryHasBlob(repoCacheName, dgst) { context.GetLogger(r.ctx).Debugf("found cached blob %q in repository %s", dgst.String(), r.Named().Name()) return true } context.GetLogger(r.ctx).Debugf("verifying presence of blob %q in image stream %s/%s", dgst.String(), r.namespace, r.name) started := time.Now() logFound := func(found bool) bool { elapsed := time.Now().Sub(started) if found { context.GetLogger(r.ctx).Debugf("verified presence of blob %q in image stream %s/%s after %s", dgst.String(), r.namespace, r.name, elapsed.String()) } else { context.GetLogger(r.ctx).Debugf("detected absence of blob %q in image stream %s/%s after %s", dgst.String(), r.namespace, r.name, elapsed.String()) } return found } // verify directly with etcd is, err := r.getImageStream() if err != nil { context.GetLogger(r.ctx).Errorf("failed to get image stream: %v", err) return logFound(false) } tagEvents := []*imageapi.TagEvent{} event2Name := make(map[*imageapi.TagEvent]string) for name, eventList := range is.Status.Tags { for i := range eventList.Items { event := &eventList.Items[i] tagEvents = append(tagEvents, event) event2Name[event] = name } } // search from youngest to oldest sort.Sort(ByGeneration(tagEvents)) processedImages := map[string]struct{}{} for _, tagEvent := range tagEvents { if _, processed := processedImages[tagEvent.Image]; processed { continue } if imageHasBlob(r, repoCacheName, tagEvent.Image, dgst.String(), !r.pullthrough) { tagName := event2Name[tagEvent] context.GetLogger(r.ctx).Debugf("blob found under istag %s/%s:%s in image %s", r.namespace, r.name, tagName, tagEvent.Image) return logFound(true) } processedImages[tagEvent.Image] = struct{}{} } context.GetLogger(r.ctx).Warnf("blob %q exists locally but is not referenced in repository %s/%s", dgst.String(), r.namespace, r.name) return logFound(false) }
// BuildBlobURL constructs the url for the blob identified by name and dgst. func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) { route := ub.cloneRoute(RouteNameBlob) layerURL, err := route.URL("name", name, "digest", dgst.String()) if err != nil { return "", err } return layerURL.String(), nil }
// AddBlob schedules a blob cleanup after ttl expires func (ttles *TTLExpirationScheduler) AddBlob(dgst digest.Digest, ttl time.Duration) error { ttles.Lock() defer ttles.Unlock() if ttles.stopped { return fmt.Errorf("scheduler not started") } ttles.add(dgst.String(), ttl, entryTypeBlob) return nil }
func (m *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { m.repo.requests = append(m.repo.requests, dgst.String()) if err, exists := m.repo.errors[dgst]; exists { return distribution.Descriptor{}, err } if desc, exists := m.repo.blobs[dgst]; exists { return desc, nil } return distribution.Descriptor{}, distribution.ErrBlobUnknown }
// Stat retrieves the descriptor data from the redis hash entry. func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { if err := dgst.Validate(); err != nil { return distribution.Descriptor{}, err } conn := rbds.pool.Get() defer conn.Close() return rbds.stat(ctx, conn, dgst) }
func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { desc, err := pbs.localStore.Stat(ctx, dgst) if err != nil && err != distribution.ErrBlobUnknown { return err } if err == nil { proxyMetrics.BlobPush(uint64(desc.Size)) return pbs.localStore.ServeBlob(ctx, w, r, dgst) } desc, err = pbs.remoteStore.Stat(ctx, dgst) if err != nil { return err } remoteReader, err := pbs.remoteStore.Open(ctx, dgst) if err != nil { return err } bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc) if err != nil { return err } defer cleanup() if isNew { go func() { err := streamToStorage(ctx, remoteReader, desc, bw) if err != nil { context.GetLogger(ctx).Error(err) } proxyMetrics.BlobPull(uint64(desc.Size)) }() err := streamToClient(ctx, w, desc, bw) if err != nil { return err } proxyMetrics.BlobPush(uint64(desc.Size)) pbs.scheduler.AddBlob(dgst.String(), blobTTL) return nil } err = streamToClient(ctx, w, desc, bw) if err != nil { return err } proxyMetrics.BlobPush(uint64(desc.Size)) return nil }
// RememberDigest associates a digest with a repository. func (c digestToRepositoryCache) RememberDigest(dgst digest.Digest, repo string) { key := dgst.String() value, ok := c.Get(key) if !ok { value = &repositoryBucket{} if ok, _ := c.ContainsOrAdd(key, value); !ok { return } } repos := value.(*repositoryBucket) repos.Add(repo) }
func (cs *ContentStore) GetPath(dgst digest.Digest) (string, error) { p := filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) if _, err := os.Stat(p); err != nil { if os.IsNotExist(err) { return "", ErrBlobNotFound } return "", err } return p, nil }