Esempio n. 1
0
// ServeBlob attempts to serve the requested digest onto w, using a remote proxy store if necessary.
func (r *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
	store, ok := r.digestToStore[dgst.String()]
	if !ok {
		return r.BlobStore.ServeBlob(ctx, w, req, dgst)
	}

	desc, err := store.Stat(ctx, dgst)
	if err != nil {
		context.GetLogger(ctx).Errorf("Failed to stat digest %q: %v", dgst.String(), err)
		return err
	}

	remoteReader, err := store.Open(ctx, dgst)
	if err != nil {
		context.GetLogger(ctx).Errorf("Failure to open remote store for digest %q: %v", dgst.String(), err)
		return err
	}
	defer remoteReader.Close()

	setResponseHeaders(w, desc.Size, desc.MediaType, dgst)

	context.GetLogger(ctx).Infof("serving blob %s of type %s %d bytes long", dgst.String(), desc.MediaType, desc.Size)
	http.ServeContent(w, req, desc.Digest.String(), time.Time{}, remoteReader)
	return nil
}
Esempio n. 2
0
// fillImageWithMetadata fills a given image with metadata. Also correct layer sizes with blob sizes. Newer
// Docker client versions don't set layer sizes in the manifest at all. Origin master needs correct layer
// sizes for proper image quota support. That's why we need to fill the metadata in the registry.
func (r *repository) fillImageWithMetadata(manifest *schema1.SignedManifest, image *imageapi.Image) error {
	if err := imageapi.ImageWithMetadata(image); err != nil {
		return err
	}

	layerSet := sets.NewString()
	size := int64(0)

	blobs := r.Blobs(r.ctx)
	for i := range image.DockerImageLayers {
		layer := &image.DockerImageLayers[i]
		// DockerImageLayers represents manifest.Manifest.FSLayers in reversed order
		desc, err := blobs.Stat(r.ctx, manifest.Manifest.FSLayers[len(image.DockerImageLayers)-i-1].BlobSum)
		if err != nil {
			context.GetLogger(r.ctx).Errorf("Failed to stat blobs %s of image %s", layer.Name, image.DockerImageReference)
			return err
		}
		layer.Size = desc.Size
		// count empty layer just once (empty layer may actually have non-zero size)
		if !layerSet.Has(layer.Name) {
			size += desc.Size
			layerSet.Insert(layer.Name)
		}
	}

	image.DockerImageMetadata.Size = size
	context.GetLogger(r.ctx).Infof("Total size of image %s with docker ref %s: %d", image.Name, image.DockerImageReference, size)

	return nil
}
Esempio n. 3
0
// newQuotaEnforcingConfig creates caches for quota objects. The objects are stored with given eviction
// timeout. Caches will only be initialized if the given ttl is positive. Options are gathered from
// configuration file and will be overridden by enforceQuota and projectCacheTTL environment variable values.
func newQuotaEnforcingConfig(ctx context.Context, enforceQuota, projectCacheTTL string, options map[string]interface{}) *quotaEnforcingConfig {
	enforce, err := getBoolOption(EnforceQuotaEnvVar, "enforcequota", false, options)
	if err != nil {
		logrus.Error(err)
	}

	if !enforce {
		context.GetLogger(ctx).Info("quota enforcement disabled")
		return &quotaEnforcingConfig{
			enforcementDisabled:  true,
			projectCacheDisabled: true,
		}
	}

	ttl, err := getDurationOption(ProjectCacheTTLEnvVar, "projectcachettl", defaultProjectCacheTTL, options)
	if err != nil {
		logrus.Error(err)
	}

	if ttl <= 0 {
		context.GetLogger(ctx).Info("not using project caches for quota objects")
		return &quotaEnforcingConfig{
			projectCacheDisabled: true,
		}
	}

	context.GetLogger(ctx).Infof("caching project quota objects with TTL %s", ttl.String())
	return &quotaEnforcingConfig{
		limitRanges: newProjectObjectListCache(ttl),
	}
}
Esempio n. 4
0
// ServeBlob attempts to serve the requested digest onto w, using a remote proxy store if necessary.
// Important! This function is called for GET and HEAD requests. Docker client uses[1] HEAD request
// to check existence of a layer. If the layer with the digest is available, this function MUST return
// success response with no actual body content.
// [1] https://docs.docker.com/registry/spec/api/#existing-layers
func (pbs *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
	store, ok := pbs.digestToStore[dgst.String()]
	if !ok {
		return pbs.BlobStore.ServeBlob(ctx, w, req, dgst)
	}

	// store the content locally if requested, but ensure only one instance at a time
	// is storing to avoid excessive local writes
	if pbs.mirror {
		mu.Lock()
		if _, ok = inflight[dgst]; ok {
			mu.Unlock()
			context.GetLogger(ctx).Infof("Serving %q while mirroring in background", dgst)
			_, err := pbs.copyContent(store, ctx, dgst, w, req)
			return err
		}
		inflight[dgst] = struct{}{}
		mu.Unlock()

		go func(dgst digest.Digest) {
			context.GetLogger(ctx).Infof("Start background mirroring of %q", dgst)
			if err := pbs.storeLocal(store, ctx, dgst); err != nil {
				context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
			}
			context.GetLogger(ctx).Infof("Completed mirroring of %q", dgst)
		}(dgst)
	}

	_, err := pbs.copyContent(store, ctx, dgst, w, req)
	return err
}
Esempio n. 5
0
// rememberLayersOfImage caches the layer digests of given image
func (r *repository) rememberLayersOfImage(image *imageapi.Image, cacheName string) {
	if len(image.DockerImageLayers) == 0 && len(image.DockerImageManifestMediaType) > 0 && len(image.DockerImageConfig) == 0 {
		// image has no layers
		return
	}

	if len(image.DockerImageLayers) > 0 {
		for _, layer := range image.DockerImageLayers {
			r.cachedLayers.RememberDigest(digest.Digest(layer.Name), r.blobrepositorycachettl, cacheName)
		}
		// remember reference to manifest config as well for schema 2
		if image.DockerImageManifestMediaType == schema2.MediaTypeManifest && len(image.DockerImageMetadata.ID) > 0 {
			r.cachedLayers.RememberDigest(digest.Digest(image.DockerImageMetadata.ID), r.blobrepositorycachettl, cacheName)
		}
		return
	}
	mh, err := NewManifestHandlerFromImage(r, image)
	if err != nil {
		context.GetLogger(r.ctx).Errorf("cannot remember layers of image %q: %v", image.Name, err)
		return
	}
	dgst, err := mh.Digest()
	if err != nil {
		context.GetLogger(r.ctx).Errorf("cannot get manifest digest of image %q: %v", image.Name, err)
		return
	}

	r.rememberLayersOfManifest(dgst, mh.Manifest(), cacheName)
}
Esempio n. 6
0
func (lc *cachedLayerService) Delete(dgst digest.Digest) error {
	ctxu.GetLogger(lc.ctx).Debugf("(*layerInfoCache).Delete(%q)", dgst)
	if err := lc.cache.Delete(lc.ctx, lc.repository.Name(), dgst); err != nil {
		ctxu.GetLogger(lc.ctx).Errorf("error deleting layer link from cache; repo=%s, layer=%s: %v", lc.repository.Name(), dgst, err)
	}
	return lc.LayerService.Delete(dgst)
}
Esempio n. 7
0
func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
	context.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put")

	m, ok := manifestList.(*manifestlist.DeserializedManifestList)
	if !ok {
		return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList)
	}

	if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
		return "", err
	}

	mt, payload, err := m.Payload()
	if err != nil {
		return "", err
	}

	revision, err := ms.blobStore.Put(ctx, mt, payload)
	if err != nil {
		context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
		return "", err
	}

	return revision.Digest, nil
}
Esempio n. 8
0
func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer {
	return time.AfterFunc(ttl, func() {
		ttles.Lock()
		defer ttles.Unlock()

		var f expiryFunc

		switch entry.EntryType {
		case entryTypeBlob:
			f = ttles.onBlobExpire
		case entryTypeManifest:
			f = ttles.onManifestExpire
		default:
			f = func(reference.Reference) error {
				return fmt.Errorf("scheduler entry type")
			}
		}

		ref, err := reference.Parse(entry.Key)
		if err == nil {
			if err := f(ref); err != nil {
				context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
			}
		} else {
			context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
		}

		delete(ttles.entries, entry.Key)
		ttles.indexDirty = true
	})
}
// findCandidateRepository looks in search for a particular blob, referring to previously cached items
func (r *pullthroughBlobStore) findCandidateRepository(ctx context.Context, search map[string]*imageapi.DockerImageReference, cachedLayers []string, dgst digest.Digest, retriever importer.RepositoryRetriever) (distribution.Descriptor, error) {
	// no possible remote locations to search, exit early
	if len(search) == 0 {
		return distribution.Descriptor{}, distribution.ErrBlobUnknown
	}

	// see if any of the previously located repositories containing this digest are in this
	// image stream
	for _, repo := range cachedLayers {
		ref, ok := search[repo]
		if !ok {
			continue
		}
		desc, err := r.proxyStat(ctx, retriever, *ref, dgst)
		if err != nil {
			delete(search, repo)
			continue
		}
		context.GetLogger(r.repo.ctx).Infof("Found digest location from cache %q in %q: %v", dgst, repo, err)
		return desc, nil
	}

	// search the remaining registries for this digest
	for repo, ref := range search {
		desc, err := r.proxyStat(ctx, retriever, *ref, dgst)
		if err != nil {
			continue
		}
		r.repo.cachedLayers.RememberDigest(dgst, repo)
		context.GetLogger(r.repo.ctx).Infof("Found digest location by search %q in %q: %v", dgst, repo, err)
		return desc, nil
	}

	return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
Esempio n. 10
0
func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
	served, err := pbs.serveLocal(ctx, w, r, dgst)
	if err != nil {
		context.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error())
		return err
	}

	if served {
		return nil
	}

	mu.Lock()
	_, ok := inflight[dgst]
	if ok {
		mu.Unlock()
		_, err := pbs.copyContent(ctx, dgst, w)
		return err
	}
	inflight[dgst] = struct{}{}
	mu.Unlock()

	go func(dgst digest.Digest) {
		if err := pbs.storeLocal(ctx, dgst); err != nil {
			context.GetLogger(ctx).Errorf("Error committing to storage: %s", err.Error())
		}
		pbs.scheduler.AddBlob(dgst.String(), repositoryTTL)
	}(dgst)

	_, err = pbs.copyContent(ctx, dgst, w)
	if err != nil {
		return err
	}
	return nil
}
Esempio n. 11
0
func (app *App) logError(context context.Context, errors errcode.Errors) {
	for _, e1 := range errors {
		var c ctxu.Context

		switch e1.(type) {
		case errcode.Error:
			e, _ := e1.(errcode.Error)
			c = ctxu.WithValue(context, "err.code", e.Code)
			c = ctxu.WithValue(c, "err.message", e.Code.Message())
			c = ctxu.WithValue(c, "err.detail", e.Detail)
		case errcode.ErrorCode:
			e, _ := e1.(errcode.ErrorCode)
			c = ctxu.WithValue(context, "err.code", e)
			c = ctxu.WithValue(c, "err.message", e.Message())
		default:
			// just normal go 'error'
			c = ctxu.WithValue(context, "err.code", errcode.ErrorCodeUnknown)
			c = ctxu.WithValue(c, "err.message", e1.Error())
		}

		c = ctxu.WithLogger(c, ctxu.GetLogger(c,
			"err.code",
			"err.message",
			"err.detail"))
		ctxu.GetLogger(c).Errorf("An error occured")
	}
}
Esempio n. 12
0
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
	desc, err := cbds.cache.Stat(ctx, dgst)
	if err != nil {
		if err != distribution.ErrBlobUnknown {
			context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err)
		}

		goto fallback
	}

	if cbds.tracker != nil {
		cbds.tracker.Hit()
	}
	return desc, nil
fallback:
	if cbds.tracker != nil {
		cbds.tracker.Miss()
	}
	desc, err = cbds.backend.Stat(ctx, dgst)
	if err != nil {
		return desc, err
	}

	if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
		context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err)
	}

	return desc, err

}
Esempio n. 13
0
func (r *errorBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
	if err := r.repo.checkPendingErrors(ctx); err != nil {
		return nil, err
	}

	ctx = WithRepository(ctx, r.repo)

	opts, err := effectiveCreateOptions(options)
	if err != nil {
		return nil, err
	}
	err = checkPendingCrossMountErrors(ctx, opts)

	if err != nil {
		context.GetLogger(ctx).Infof("disabling cross-repo mount because of an error: %v", err)
		options = append(options, guardCreateOptions{DisableCrossMount: true})
	} else if !opts.Mount.ShouldMount {
		options = append(options, guardCreateOptions{DisableCrossMount: true})
	} else {
		context.GetLogger(ctx).Debugf("attempting cross-repo mount")
		options = append(options, statCrossMountCreateOptions{
			ctx:      ctx,
			destRepo: r.repo,
		})
	}

	return r.store.Create(ctx, options...)
}
// ServeBlob attempts to serve the requested digest onto w, using a remote proxy store if necessary.
func (r *pullthroughBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error {
	store, ok := r.digestToStore[dgst.String()]
	if !ok {
		return r.BlobStore.ServeBlob(ctx, w, req, dgst)
	}

	desc, err := store.Stat(ctx, dgst)
	if err != nil {
		context.GetLogger(r.repo.ctx).Errorf("Failed to stat digest %q: %v", dgst.String(), err)
		return err
	}

	remoteReader, err := store.Open(ctx, dgst)
	if err != nil {
		context.GetLogger(r.repo.ctx).Errorf("Failure to open remote store %q: %v", dgst.String(), err)
		return err
	}

	setResponseHeaders(w, desc.Size, desc.MediaType, dgst)

	context.GetLogger(r.repo.ctx).Infof("Copying %d bytes of type %q for %q", desc.Size, desc.MediaType, dgst.String())
	if _, err := io.CopyN(w, remoteReader, desc.Size); err != nil {
		context.GetLogger(r.repo.ctx).Errorf("Failed copying content from remote store %q: %v", dgst.String(), err)
		return err
	}
	return nil
}
Esempio n. 15
0
func (t tagService) Tag(ctx context.Context, tag string, dgst distribution.Descriptor) error {
	imageStream, err := t.repo.getImageStream()
	if err != nil {
		context.GetLogger(ctx).Errorf("error retrieving ImageStream %s/%s: %v", t.repo.namespace, t.repo.name, err)
		return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
	}

	image, err := t.repo.registryOSClient.Images().Get(dgst.Digest.String())
	if err != nil {
		context.GetLogger(ctx).Errorf("unable to get image: %s", dgst.Digest.String())
		return err
	}
	image.SetResourceVersion("")

	if !t.repo.pullthrough && !isImageManaged(image) {
		return distribution.ErrRepositoryUnknown{Name: t.repo.Named().Name()}
	}

	ism := imageapi.ImageStreamMapping{
		ObjectMeta: kapi.ObjectMeta{
			Namespace: imageStream.Namespace,
			Name:      imageStream.Name,
		},
		Tag:   tag,
		Image: *image,
	}

	err = t.repo.registryOSClient.ImageStreamMappings(imageStream.Namespace).Create(&ism)
	if quotautil.IsErrorQuotaExceeded(err) {
		context.GetLogger(ctx).Errorf("denied creating ImageStreamMapping: %v", err)
		return distribution.ErrAccessDenied
	}

	return err
}
Esempio n. 16
0
// authorized checks if the request can proceed with access to the requested
// repository. If it succeeds, the context may access the requested
// repository. An error will be returned if access is not available.
func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context, nameRequired nameRequiredFunc, customAccessRecords []auth.Access) error {
	ctxu.GetLogger(context).Debug("authorizing request")
	repo := getName(context)

	if app.accessController == nil {
		return nil // access controller is not enabled.
	}

	var accessRecords []auth.Access
	accessRecords = append(accessRecords, customAccessRecords...)

	if repo != "" {
		accessRecords = appendAccessRecords(accessRecords, r.Method, repo)
	}

	if len(accessRecords) == 0 {
		// Only allow the name not to be set on the base route.
		if nameRequired(r) {
			// For this to be properly secured, repo must always be set for a
			// resource that may make a modification. The only condition under
			// which name is not set and we still allow access is when the
			// base route is accessed. This section prevents us from making
			// that mistake elsewhere in the code, allowing any operation to
			// proceed.
			if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil {
				ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
			}
			return fmt.Errorf("forbidden: no repository name")
		}
		accessRecords = appendCatalogAccessRecord(accessRecords, r)
	}

	ctx, err := app.accessController.Authorized(context.Context, accessRecords...)
	if err != nil {
		switch err := err.(type) {
		case auth.Challenge:
			// Add the appropriate WWW-Auth header
			err.SetHeaders(w)

			if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil {
				ctxu.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
			}
		default:
			// This condition is a potential security problem either in
			// the configuration or whatever is backing the access
			// controller. Just return a bad request with no information
			// to avoid exposure. The request should not proceed.
			ctxu.GetLogger(context).Errorf("error checking authorization: %v", err)
			w.WriteHeader(http.StatusBadRequest)
		}

		return err
	}

	// TODO(stevvooe): This pattern needs to be cleaned up a bit. One context
	// should be replaced by another, rather than replacing the context on a
	// mutable object.
	context.Context = ctx
	return nil
}
Esempio n. 17
0
// Put stores the content p in the blob store, calculating the digest. If the
// content is already present, only the digest will be returned. This should
// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations
func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
	dgst, err := digest.FromBytes(p)
	if err != nil {
		context.GetLogger(ctx).Errorf("blobStore: error digesting content: %v, %s", err, string(p))
		return distribution.Descriptor{}, err
	}

	desc, err := bs.statter.Stat(ctx, dgst)
	if err == nil {
		// content already present
		return desc, nil
	} else if err != distribution.ErrBlobUnknown {
		context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err)
		// real error, return it
		return distribution.Descriptor{}, err
	}

	bp, err := bs.path(dgst)
	if err != nil {
		return distribution.Descriptor{}, err
	}

	// TODO(stevvooe): Write out mediatype here, as well.

	return distribution.Descriptor{
		Size: int64(len(p)),

		// NOTE(stevvooe): The central blob store firewalls media types from
		// other users. The caller should look this up and override the value
		// for the specific repository.
		MediaType: "application/octet-stream",
		Digest:    dgst,
	}, bs.driver.PutContent(ctx, bp, p)
}
Esempio n. 18
0
func filterAccessList(ctx context.Context, scope string, requestedAccessList []auth.Access) []auth.Access {
	if !strings.HasSuffix(scope, "/") {
		scope = scope + "/"
	}
	grantedAccessList := make([]auth.Access, 0, len(requestedAccessList))
	for _, access := range requestedAccessList {
		if access.Type == "repository" {
			if !strings.HasPrefix(access.Name, scope) {
				context.GetLogger(ctx).Debugf("Resource scope not allowed: %s", access.Name)
				continue
			}
		} else if access.Type == "registry" {
			if access.Name != "catalog" {
				context.GetLogger(ctx).Debugf("Unknown registry resource: %s", access.Name)
				continue
			}
			// TODO: Limit some actions to "admin" users
		} else {
			context.GetLogger(ctx).Debugf("Skipping unsupported resource type: %s", access.Type)
			continue
		}
		grantedAccessList = append(grantedAccessList, access)
	}
	return grantedAccessList
}
Esempio n. 19
0
// GetLayer fetches the binary data from backend storage returns it in the
// response.
func (lh *layerHandler) GetLayer(w http.ResponseWriter, r *http.Request) {
	ctxu.GetLogger(lh).Debug("GetImageLayer")
	layers := lh.Repository.Layers()
	layer, err := layers.Fetch(lh.Digest)

	if err != nil {
		switch err := err.(type) {
		case distribution.ErrUnknownLayer:
			w.WriteHeader(http.StatusNotFound)
			lh.Errors.Push(v2.ErrorCodeBlobUnknown, err.FSLayer)
		default:
			lh.Errors.Push(v2.ErrorCodeUnknown, err)
		}
		return
	}

	handler, err := layer.Handler(r)
	if err != nil {
		ctxu.GetLogger(lh).Debugf("unexpected error getting layer HTTP handler: %s", err)
		lh.Errors.Push(v2.ErrorCodeUnknown, err)
		return
	}

	handler.ServeHTTP(w, r)
}
Esempio n. 20
0
// GetByTag retrieves the named manifest with the provided tag
func (r *repository) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*schema1.SignedManifest, error) {
	for _, opt := range options {
		if err := opt(r); err != nil {
			return nil, err
		}
	}
	imageStreamTag, err := r.getImageStreamTag(tag)
	if err != nil {
		context.GetLogger(r.ctx).Errorf("Error getting ImageStreamTag %q: %v", tag, err)
		return nil, err
	}
	image := &imageStreamTag.Image

	dgst, err := digest.ParseDigest(imageStreamTag.Image.Name)
	if err != nil {
		context.GetLogger(r.ctx).Errorf("Error parsing digest %q: %v", imageStreamTag.Image.Name, err)
		return nil, err
	}

	image, err = r.getImage(dgst)
	if err != nil {
		context.GetLogger(r.ctx).Errorf("Error getting image %q: %v", dgst.String(), err)
		return nil, err
	}

	return r.manifestFromImage(image)
}
Esempio n. 21
0
func verifyImageStreamAccess(ctx context.Context, namespace, imageRepo, verb string, client client.LocalSubjectAccessReviewsNamespacer) error {
	sar := authorizationapi.LocalSubjectAccessReview{
		Action: authorizationapi.Action{
			Verb:         verb,
			Group:        imageapi.GroupName,
			Resource:     "imagestreams/layers",
			ResourceName: imageRepo,
		},
	}
	response, err := client.LocalSubjectAccessReviews(namespace).Create(&sar)

	if err != nil {
		context.GetLogger(ctx).Errorf("OpenShift client error: %s", err)
		if kerrors.IsUnauthorized(err) || kerrors.IsForbidden(err) {
			return ErrOpenShiftAccessDenied
		}
		return err
	}

	if !response.Allowed {
		context.GetLogger(ctx).Errorf("OpenShift access denied: %s", response.Reason)
		return ErrOpenShiftAccessDenied
	}

	return nil
}
Esempio n. 22
0
// Get retrieves the manifest with digest `dgst`.
func (r *repository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
	if err := r.checkPendingErrors(ctx); err != nil {
		return nil, err
	}

	if _, err := r.getImageStreamImage(dgst); err != nil {
		context.GetLogger(r.ctx).Errorf("error retrieving ImageStreamImage %s/%s@%s: %v", r.namespace, r.name, dgst.String(), err)
		return nil, err
	}

	image, err := r.getImage(dgst)
	if err != nil {
		context.GetLogger(r.ctx).Errorf("error retrieving image %s: %v", dgst.String(), err)
		return nil, err
	}

	ref := imageapi.DockerImageReference{Namespace: r.namespace, Name: r.name, Registry: r.registryAddr}
	if managed := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; managed == "true" {
		// Repository without a registry part is refers to repository containing locally managed images.
		// Such an entry is retrieved, checked and set by blobDescriptorService operating only on local blobs.
		ref.Registry = ""
	} else {
		// Repository with a registry points to remote repository. This is used by pullthrough middleware.
		ref = ref.DockerClientDefaults().AsRepository()
	}

	manifest, err := r.manifestFromImageWithCachedLayers(image, ref.Exact())

	return manifest, err
}
func (ms *signedManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
	context.GetLogger(ms.ctx).Debug("(*signedManifestHandler).Put")

	sm, ok := manifest.(*schema1.SignedManifest)
	if !ok {
		return "", fmt.Errorf("non-schema1 manifest put to signedManifestHandler: %T", manifest)
	}

	if err := ms.verifyManifest(ms.ctx, *sm, skipDependencyVerification); err != nil {
		return "", err
	}

	mt := schema1.MediaTypeManifest
	payload := sm.Canonical

	revision, err := ms.blobStore.Put(ctx, mt, payload)
	if err != nil {
		context.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
		return "", err
	}

	// Link the revision into the repository.
	if err := ms.blobStore.linkBlob(ctx, revision); err != nil {
		return "", err
	}

	return revision.Digest, nil
}
// Stat returns a a blob descriptor if the given blob is either linked in repository or is referenced in
// corresponding image stream. This method is invoked from inside of upstream's linkedBlobStore. It expects
// a proper repository object to be set on given context by upper openshift middleware wrappers.
func (bs *blobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
	repo, found := RepositoryFrom(ctx)
	if !found || repo == nil {
		err := fmt.Errorf("failed to retrieve repository from context")
		context.GetLogger(ctx).Error(err)
		return distribution.Descriptor{}, err
	}

	// if there is a repo layer link, return its descriptor
	desc, err := bs.BlobDescriptorService.Stat(ctx, dgst)
	if err == nil {
		// and remember the association
		repo.cachedLayers.RememberDigest(dgst, repo.blobrepositorycachettl, imageapi.DockerImageReference{
			Namespace: repo.namespace,
			Name:      repo.name,
		}.Exact())
		return desc, nil
	}

	context.GetLogger(ctx).Debugf("could not stat layer link %q in repository %q: %v", dgst.String(), repo.Named().Name(), err)

	// verify the blob is stored locally
	desc, err = dockerRegistry.BlobStatter().Stat(ctx, dgst)
	if err != nil {
		return desc, err
	}

	// ensure it's referenced inside of corresponding image stream
	if imageStreamHasBlob(repo, dgst) {
		return desc, nil
	}

	return distribution.Descriptor{}, distribution.ErrBlobUnknown
}
Esempio n. 25
0
// newQuotaEnforcingConfig creates caches for quota objects. The objects are stored with given eviction
// timeout. Caches will only be initialized if the given ttl is positive. Options are gathered from
// configuration file and will be overriden by enforceQuota and projectCacheTTL environment variable values.
func newQuotaEnforcingConfig(ctx context.Context, enforceQuota, projectCacheTTL string, options map[string]interface{}) *quotaEnforcingConfig {
	buildOptionValues := func(optionName string, override string) []string {
		optValues := []string{}
		if value, ok := options[optionName]; ok {
			var res string
			switch v := value.(type) {
			case string:
				res = v
			case bool:
				res = fmt.Sprintf("%t", v)
			default:
				res = fmt.Sprintf("%v", v)
			}
			if len(res) > 0 {
				optValues = append(optValues, res)
			}
		}
		if len(override) > 0 {
			optValues = append(optValues, override)
		}
		return optValues
	}

	enforce := false
	for _, s := range buildOptionValues("enforcequota", enforceQuota) {
		enforce = s == "true"
	}
	if !enforce {
		context.GetLogger(ctx).Info("quota enforcement disabled")
		return &quotaEnforcingConfig{
			enforcementDisabled:  true,
			projectCacheDisabled: true,
		}
	}

	ttl := defaultProjectCacheTTL
	for _, s := range buildOptionValues("projectcachettl", projectCacheTTL) {
		parsed, err := time.ParseDuration(s)
		if err != nil {
			logrus.Errorf("failed to parse project cache ttl %q: %v", s, err)
			continue
		}
		ttl = parsed
	}

	if ttl <= 0 {
		context.GetLogger(ctx).Info("not using project caches for quota objects")
		return &quotaEnforcingConfig{
			projectCacheDisabled: true,
		}
	}

	context.GetLogger(ctx).Infof("caching project quota objects with TTL %s", ttl.String())
	return &quotaEnforcingConfig{
		limitRanges: newProjectObjectListCache(ttl),
	}
}
func (bs *quotaRestrictedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) {
	context.GetLogger(ctx).Debug("(*quotaRestrictedBlobStore).Put: starting")

	if err := admitBlobWrite(ctx, bs.repo); err != nil {
		context.GetLogger(ctx).Error(err.Error())
		return distribution.Descriptor{}, err
	}

	return bs.BlobStore.Put(ctx, mediaType, p)
}
// imageHasBlob returns true if the image identified by imageName refers to the given blob. The image is
// fetched. If requireManaged is true and the image is not managed (it refers to remote registry), the image
// will not be processed. Fetched image will update local cache of blobs -> repositories with (blobDigest,
// cacheName) pairs.
func imageHasBlob(
	r *repository,
	cacheName,
	imageName,
	blobDigest string,
	requireManaged bool,
) bool {
	context.GetLogger(r.ctx).Debugf("getting image %s", imageName)
	image, err := r.getImage(digest.Digest(imageName))
	if err != nil {
		if kerrors.IsNotFound(err) {
			context.GetLogger(r.ctx).Debugf("image %q not found: imageName")
		} else {
			context.GetLogger(r.ctx).Errorf("failed to get image: %v", err)
		}
		return false
	}

	// in case of pullthrough disabled, client won't be able to download a blob belonging to not managed image
	// (image stored in external registry), thus don't consider them as candidates
	if managed := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; requireManaged && managed != "true" {
		context.GetLogger(r.ctx).Debugf("skipping not managed image")
		return false
	}

	if len(image.DockerImageLayers) == 0 {
		if len(image.DockerImageManifestMediaType) > 0 {
			// If the media type is set, we can safely assume that the best effort to fill the image layers
			// has already been done. There are none.
			return false
		}
		err = imageapi.ImageWithMetadata(image)
		if err != nil {
			context.GetLogger(r.ctx).Errorf("failed to get metadata for image %s: %v", imageName, err)
			return false
		}
	}

	for _, layer := range image.DockerImageLayers {
		if layer.Name == blobDigest {
			// remember all the layers of matching image
			r.rememberLayersOfImage(image, cacheName)
			return true
		}
	}

	// only manifest V2 schema2 has docker image config filled where dockerImage.Metadata.id is its digest
	if len(image.DockerImageConfig) > 0 && image.DockerImageMetadata.ID == blobDigest {
		// remember manifest config reference of schema 2 as well
		r.rememberLayersOfImage(image, cacheName)
		return true
	}

	return false
}
Esempio n. 28
0
// configureLogging prepares the context with a logger using the
// configuration.
func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) {
	if config.Log.Level == "" && config.Log.Formatter == "" {
		// If no config for logging is set, fallback to deprecated "Loglevel".
		log.SetLevel(logLevel(config.Loglevel))
		ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version"))
		return ctx, nil
	}

	log.SetLevel(logLevel(config.Log.Level))

	formatter := config.Log.Formatter
	if formatter == "" {
		formatter = "text" // default formatter
	}

	switch formatter {
	case "json":
		log.SetFormatter(&log.JSONFormatter{
			TimestampFormat: time.RFC3339Nano,
		})
	case "text":
		log.SetFormatter(&log.TextFormatter{
			TimestampFormat: time.RFC3339Nano,
		})
	case "logstash":
		log.SetFormatter(&logstash.LogstashFormatter{
			TimestampFormat: time.RFC3339Nano,
		})
	default:
		// just let the library use default on empty string.
		if config.Log.Formatter != "" {
			return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
		}
	}

	if config.Log.Formatter != "" {
		log.Debugf("using %q logging formatter", config.Log.Formatter)
	}

	// log the application version with messages
	ctx = context.WithLogger(ctx, context.GetLogger(ctx, "version"))

	if len(config.Log.Fields) > 0 {
		// build up the static fields, if present.
		var fields []interface{}
		for k := range config.Log.Fields {
			fields = append(fields, k)
		}

		ctx = context.WithValues(ctx, config.Log.Fields)
		ctx = context.WithLogger(ctx, context.GetLogger(ctx, fields...))
	}

	return ctx, nil
}
func (bw *quotaRestrictedBlobWriter) Commit(ctx context.Context, provisional distribution.Descriptor) (canonical distribution.Descriptor, err error) {
	context.GetLogger(ctx).Debug("(*quotaRestrictedBlobWriter).Commit: starting")

	if err := admitBlobWrite(ctx, bw.repo); err != nil {
		context.GetLogger(ctx).Error(err.Error())
		return distribution.Descriptor{}, err
	}

	can, err := bw.BlobWriter.Commit(ctx, provisional)
	return can, err
}
Esempio n. 30
0
func (app *App) logError(context context.Context, errors v2.Errors) {
	for _, e := range errors.Errors {
		c := ctxu.WithValue(context, "err.code", e.Code)
		c = ctxu.WithValue(c, "err.message", e.Message)
		c = ctxu.WithValue(c, "err.detail", e.Detail)
		c = ctxu.WithLogger(c, ctxu.GetLogger(c,
			"err.code",
			"err.message",
			"err.detail"))
		ctxu.GetLogger(c).Errorf("An error occured")
	}
}