func (pms proxyManifestStore) Get(dgst digest.Digest) (*manifest.SignedManifest, error) {
	sm, err := pms.localManifests.Get(dgst)
	if err == nil {
		proxyMetrics.ManifestPush(uint64(len(sm.Raw)))
		return sm, err
	}

	sm, err = pms.remoteManifests.Get(dgst)
	if err != nil {
		return nil, err
	}

	proxyMetrics.ManifestPull(uint64(len(sm.Raw)))
	err = pms.localManifests.Put(sm)
	if err != nil {
		return nil, err
	}

	// Schedule the repo for removal
	pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL)

	// Ensure the manifest blob is cleaned up
	pms.scheduler.AddBlob(dgst.String(), repositoryTTL)

	proxyMetrics.ManifestPush(uint64(len(sm.Raw)))

	return sm, err
}
Example #2
0
// Stat ensures that the digest is a member of the specified repository and
// forwards the descriptor request to the global blob store. If the media type
// differs for the repository, we override it.
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
	if err := dgst.Validate(); err != nil {
		return distribution.Descriptor{}, err
	}

	conn := rsrbds.upstream.pool.Get()
	defer conn.Close()

	// Check membership to repository first
	member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst))
	if err != nil {
		return distribution.Descriptor{}, err
	}

	if !member {
		return distribution.Descriptor{}, distribution.ErrBlobUnknown
	}

	upstream, err := rsrbds.upstream.stat(ctx, conn, dgst)
	if err != nil {
		return distribution.Descriptor{}, err
	}

	// We allow a per repository mediatype, let's look it up here.
	mediatype, err := redis.String(conn.Do("HGET", rsrbds.blobDescriptorHashKey(dgst), "mediatype"))
	if err != nil {
		return distribution.Descriptor{}, err
	}

	if mediatype != "" {
		upstream.MediaType = mediatype
	}

	return upstream, nil
}
Example #3
0
func (graph *Graph) setLayerDigest(id string, dgst digest.Digest) error {
	root := graph.imageRoot(id)
	if err := ioutil.WriteFile(filepath.Join(root, digestFileName), []byte(dgst.String()), 0600); err != nil {
		return fmt.Errorf("Error storing digest in %s/%s: %s", root, digestFileName, err)
	}
	return nil
}
Example #4
0
// BuildBlobURL constructs the url for the blob identified by name and dgst.
func (ub *URLBuilder) BuildBlobURL(name string, dgst digest.Digest) (string, error) {
	route := ub.cloneRoute(RouteNameBlob)

	layerURL, err := route.URL("name", name, "digest", dgst.String())
	if err != nil {
		return "", err
	}

	return layerURL.String(), nil
}
Example #5
0
// Stat retrieves the descriptor data from the redis hash entry.
func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
	if err := dgst.Validate(); err != nil {
		return distribution.Descriptor{}, err
	}

	conn := rbds.pool.Get()
	defer conn.Close()

	return rbds.stat(ctx, conn, dgst)
}
Example #6
0
func (pbs proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
	desc, err := pbs.localStore.Stat(ctx, dgst)
	if err != nil && err != distribution.ErrBlobUnknown {
		return err
	}

	if err == nil {
		proxyMetrics.BlobPush(uint64(desc.Size))
		return pbs.localStore.ServeBlob(ctx, w, r, dgst)
	}

	desc, err = pbs.remoteStore.Stat(ctx, dgst)
	if err != nil {
		return err
	}

	remoteReader, err := pbs.remoteStore.Open(ctx, dgst)
	if err != nil {
		return err
	}

	bw, isNew, cleanup, err := getOrCreateBlobWriter(ctx, pbs.localStore, desc)
	if err != nil {
		return err
	}
	defer cleanup()

	if isNew {
		go func() {
			err := streamToStorage(ctx, remoteReader, desc, bw)
			if err != nil {
				context.GetLogger(ctx).Error(err)
			}

			proxyMetrics.BlobPull(uint64(desc.Size))
		}()
		err := streamToClient(ctx, w, desc, bw)
		if err != nil {
			return err
		}

		proxyMetrics.BlobPush(uint64(desc.Size))
		pbs.scheduler.AddBlob(dgst.String(), blobTTL)
		return nil
	}

	err = streamToClient(ctx, w, desc, bw)
	if err != nil {
		return err
	}
	proxyMetrics.BlobPush(uint64(desc.Size))
	return nil
}
Example #7
0
// SetDescriptor sets the descriptor data for the given digest using a redis
// hash. A hash is used here since we may store unrelated fields about a layer
// in the future.
func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
	if err := dgst.Validate(); err != nil {
		return err
	}

	if err := cache.ValidateDescriptor(desc); err != nil {
		return err
	}

	conn := rbds.pool.Get()
	defer conn.Close()

	return rbds.setDescriptor(ctx, conn, dgst, desc)
}
Example #8
0
// digestPathComponents provides a consistent path breakdown for a given
// digest. For a generic digest, it will be as follows:
//
// 	<algorithm>/<hex digest>
//
// Most importantly, for tarsum, the layout looks like this:
//
// 	tarsum/<version>/<digest algorithm>/<full digest>
//
// If multilevel is true, the first two bytes of the digest will separate
// groups of digest folder. It will be as follows:
//
// 	<algorithm>/<first two bytes of digest>/<full digest>
//
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
	if err := dgst.Validate(); err != nil {
		return nil, err
	}

	algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm()))
	hex := dgst.Hex()
	prefix := []string{algorithm}

	var suffix []string

	if multilevel {
		suffix = append(suffix, hex[:2])
	}

	suffix = append(suffix, hex)

	if tsi, err := digest.ParseTarSum(dgst.String()); err == nil {
		// We have a tarsum!
		version := tsi.Version
		if version == "" {
			version = "v0"
		}

		prefix = []string{
			"tarsum",
			version,
			tsi.Algorithm,
		}
	}

	return append(prefix, suffix...), nil
}
Example #9
0
func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
	if err := dgst.Validate(); err != nil {
		return err
	}

	if err := cache.ValidateDescriptor(desc); err != nil {
		return err
	}

	mbdc.mu.Lock()
	defer mbdc.mu.Unlock()

	mbdc.descriptors[dgst] = desc
	return nil
}
Example #10
0
func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
	if err := dgst.Validate(); err != nil {
		return distribution.Descriptor{}, err
	}

	mbdc.mu.RLock()
	defer mbdc.mu.RUnlock()

	desc, ok := mbdc.descriptors[dgst]
	if !ok {
		return distribution.Descriptor{}, distribution.ErrBlobUnknown
	}

	return desc, nil
}
Example #11
0
func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
	_, err := imbdcp.Stat(ctx, dgst)
	if err == distribution.ErrBlobUnknown {

		if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
			// if the digests differ, set the other canonical mapping
			if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil {
				return err
			}
		}

		// unknown, just set it
		return imbdcp.global.SetDescriptor(ctx, dgst, desc)
	}

	// we already know it, do nothing
	return err
}
Example #12
0
// MakeImageConfig returns immutable configuration JSON for image based on the
// v1Compatibility object, layer digest and parent StrongID. SHA256() of this
// config is the new image ID (strongID).
func MakeImageConfig(v1Compatibility []byte, layerID, parentID digest.Digest) ([]byte, error) {

	// Detect images created after 1.8.3
	img, err := NewImgJSON(v1Compatibility)
	if err != nil {
		return nil, err
	}
	useFallback := version.Version(img.DockerVersion).LessThan(noFallbackMinVersion)

	if useFallback {
		// Fallback for pre-1.8.3. Calculate base config based on Image struct
		// so that fields with default values added by Docker will use same ID
		logrus.Debugf("Using fallback hash for %v", layerID)

		v1Compatibility, err = json.Marshal(img)
		if err != nil {
			return nil, err
		}
	}

	var c map[string]*json.RawMessage
	if err := json.Unmarshal(v1Compatibility, &c); err != nil {
		return nil, err
	}

	if err := layerID.Validate(); err != nil {
		return nil, fmt.Errorf("invalid layerID: %v", err)
	}

	c["layer_id"] = rawJSON(layerID)

	if parentID != "" {
		if err := parentID.Validate(); err != nil {
			return nil, fmt.Errorf("invalid parentID %v", err)
		}
		c["parent_id"] = rawJSON(parentID)
	}

	delete(c, "id")
	delete(c, "parent")
	delete(c, "Size") // Size is calculated from data on disk and is inconsitent

	return json.Marshal(c)
}
Example #13
0
// Clear removes the descriptor from the cache and forwards to the upstream descriptor store
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
	if err := dgst.Validate(); err != nil {
		return err
	}

	conn := rsrbds.upstream.pool.Get()
	defer conn.Close()

	// Check membership to repository first
	member, err := redis.Bool(conn.Do("SISMEMBER", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst))
	if err != nil {
		return err
	}

	if !member {
		return distribution.ErrBlobUnknown
	}

	return rsrbds.upstream.Clear(ctx, dgst)
}
Example #14
0
func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error {
	if err := dgst.Validate(); err != nil {
		return err
	}

	if err := cache.ValidateDescriptor(desc); err != nil {
		return err
	}

	if dgst != desc.Digest {
		if dgst.Algorithm() == desc.Digest.Algorithm() {
			return fmt.Errorf("redis cache: digest for descriptors differ but algorthim does not: %q != %q", dgst, desc.Digest)
		}
	}

	conn := rsrbds.upstream.pool.Get()
	defer conn.Close()

	return rsrbds.setDescriptor(ctx, conn, dgst, desc)
}
Example #15
0
func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
	if err := dgst.Validate(); err != nil {
		return err
	}

	conn := rbds.pool.Get()
	defer conn.Close()

	// Not atomic in redis <= 2.3
	reply, err := conn.Do("HDEL", rbds.blobDescriptorHashKey(dgst), "digest", "length", "mediatype")
	if err != nil {
		return err
	}

	if reply == 0 {
		return distribution.ErrBlobUnknown
	}

	return nil
}
Example #16
0
func (ms *manifests) Delete(dgst digest.Digest) error {
	u, err := ms.ub.BuildManifestURL(ms.name, dgst.String())
	if err != nil {
		return err
	}
	req, err := http.NewRequest("DELETE", u, nil)
	if err != nil {
		return err
	}

	resp, err := ms.client.Do(req)
	if err != nil {
		return err
	}
	defer resp.Body.Close()

	if SuccessStatus(resp.StatusCode) {
		return nil
	}
	return handleErrorResponse(resp)
}
Example #17
0
func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, conn redis.Conn, dgst digest.Digest, desc distribution.Descriptor) error {
	if _, err := conn.Do("SADD", rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst); err != nil {
		return err
	}

	if err := rsrbds.upstream.setDescriptor(ctx, conn, dgst, desc); err != nil {
		return err
	}

	// Override repository mediatype.
	if _, err := conn.Do("HSET", rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType); err != nil {
		return err
	}

	// Also set the values for the primary descriptor, if they differ by
	// algorithm (ie sha256 vs tarsum).
	if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() {
		if err := rsrbds.setDescriptor(ctx, conn, desc.Digest, desc); err != nil {
			return err
		}
	}

	return nil
}
Example #18
0
func (pms proxyManifestStore) GetByTag(tag string, options ...distribution.ManifestServiceOption) (*manifest.SignedManifest, error) {
	var localDigest digest.Digest

	localManifest, err := pms.localManifests.GetByTag(tag, options...)
	switch err.(type) {
	case distribution.ErrManifestUnknown, distribution.ErrManifestUnknownRevision:
		goto fromremote
	case nil:
		break
	default:
		return nil, err
	}

	localDigest, err = manifestDigest(localManifest)
	if err != nil {
		return nil, err
	}

fromremote:
	var sm *manifest.SignedManifest
	sm, err = pms.remoteManifests.GetByTag(tag, client.AddEtagToTag(tag, localDigest.String()))
	if err != nil {
		return nil, err
	}

	if sm == nil {
		context.GetLogger(pms.ctx).Debugf("Local manifest for %q is latest, dgst=%s", tag, localDigest.String())
		return localManifest, nil
	}
	context.GetLogger(pms.ctx).Debugf("Updated manifest for %q, dgst=%s", tag, localDigest.String())

	err = pms.localManifests.Put(sm)
	if err != nil {
		return nil, err
	}

	dgst, err := manifestDigest(sm)
	if err != nil {
		return nil, err
	}
	pms.scheduler.AddBlob(dgst.String(), repositoryTTL)
	pms.scheduler.AddManifest(pms.repositoryName, repositoryTTL)

	proxyMetrics.ManifestPull(uint64(len(sm.Raw)))
	proxyMetrics.ManifestPush(uint64(len(sm.Raw)))

	return sm, err
}
Example #19
0
func setResponseHeaders(w http.ResponseWriter, length int64, mediaType string, digest digest.Digest) {
	w.Header().Set("Content-Length", strconv.FormatInt(length, 10))
	w.Header().Set("Content-Type", mediaType)
	w.Header().Set("Docker-Content-Digest", digest.String())
	w.Header().Set("Etag", digest.String())
}
Example #20
0
// loadManifest loads a manifest from a byte array and verifies its content.
// The signature must be verified or an error is returned. If the manifest
// contains no signatures by a trusted key for the name in the manifest, the
// image is not considered verified. The parsed manifest object and a boolean
// for whether the manifest is verified is returned.
func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) {
	sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures")
	if err != nil {
		return nil, false, fmt.Errorf("error parsing payload: %s", err)
	}

	keys, err := sig.Verify()
	if err != nil {
		return nil, false, fmt.Errorf("error verifying payload: %s", err)
	}

	payload, err := sig.Payload()
	if err != nil {
		return nil, false, fmt.Errorf("error retrieving payload: %s", err)
	}

	var manifestDigest digest.Digest

	if dgst != "" {
		manifestDigest, err = digest.ParseDigest(dgst)
		if err != nil {
			return nil, false, fmt.Errorf("invalid manifest digest from registry: %s", err)
		}

		dgstVerifier, err := digest.NewDigestVerifier(manifestDigest)
		if err != nil {
			return nil, false, fmt.Errorf("unable to verify manifest digest from registry: %s", err)
		}

		dgstVerifier.Write(payload)

		if !dgstVerifier.Verified() {
			computedDigest, _ := digest.FromBytes(payload)
			return nil, false, fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", manifestDigest, computedDigest)
		}
	}

	if utils.DigestReference(ref) && ref != manifestDigest.String() {
		return nil, false, fmt.Errorf("mismatching image manifest digest: got %q, expected %q", manifestDigest, ref)
	}

	var manifest registry.ManifestData
	if err := json.Unmarshal(payload, &manifest); err != nil {
		return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)
	}
	if manifest.SchemaVersion != 1 {
		return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion)
	}

	var verified bool
	for _, key := range keys {
		job := eng.Job("trust_key_check")
		b, err := key.MarshalJSON()
		if err != nil {
			return nil, false, fmt.Errorf("error marshalling public key: %s", err)
		}
		namespace := manifest.Name
		if namespace[0] != '/' {
			namespace = "/" + namespace
		}
		stdoutBuffer := bytes.NewBuffer(nil)

		job.Args = append(job.Args, namespace)
		job.Setenv("PublicKey", string(b))
		// Check key has read/write permission (0x03)
		job.SetenvInt("Permission", 0x03)
		job.Stdout.Add(stdoutBuffer)
		if err = job.Run(); err != nil {
			return nil, false, fmt.Errorf("error running key check: %s", err)
		}
		result := engine.Tail(stdoutBuffer, 1)
		log.Debugf("Key check result: %q", result)
		if result == "verified" {
			verified = true
		}
	}

	return &manifest, verified, nil
}
Example #21
0
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
	var (
		verified, fullHash bool
		canonical          digest.Digest
	)

	if desc.Digest == "" {
		// if no descriptors are provided, we have nothing to validate
		// against. We don't really want to support this for the registry.
		return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
			Reason: fmt.Errorf("cannot validate against empty digest"),
		}
	}

	// Stat the on disk file
	if fi, err := bw.bufferedFileWriter.driver.Stat(ctx, bw.path); err != nil {
		switch err := err.(type) {
		case storagedriver.PathNotFoundError:
			// NOTE(stevvooe): We really don't care if the file is
			// not actually present for the reader. We now assume
			// that the desc length is zero.
			desc.Size = 0
		default:
			// Any other error we want propagated up the stack.
			return distribution.Descriptor{}, err
		}
	} else {
		if fi.IsDir() {
			return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
		}

		bw.size = fi.Size()
	}

	if desc.Size > 0 {
		if desc.Size != bw.size {
			return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
		}
	} else {
		// if provided 0 or negative length, we can assume caller doesn't know or
		// care about length.
		desc.Size = bw.size
	}

	// TODO(stevvooe): This section is very meandering. Need to be broken down
	// to be a lot more clear.

	if err := bw.resumeDigestAt(ctx, bw.size); err == nil {
		canonical = bw.digester.Digest()

		if canonical.Algorithm() == desc.Digest.Algorithm() {
			// Common case: client and server prefer the same canonical digest
			// algorithm - currently SHA256.
			verified = desc.Digest == canonical
		} else {
			// The client wants to use a different digest algorithm. They'll just
			// have to be patient and wait for us to download and re-hash the
			// uploaded content using that digest algorithm.
			fullHash = true
		}
	} else if err == errResumableDigestNotAvailable {
		// Not using resumable digests, so we need to hash the entire layer.
		fullHash = true
	} else {
		return distribution.Descriptor{}, err
	}

	if fullHash {
		// a fantastic optimization: if the the written data and the size are
		// the same, we don't need to read the data from the backend. This is
		// because we've written the entire file in the lifecycle of the
		// current instance.
		if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() {
			canonical = bw.digester.Digest()
			verified = desc.Digest == canonical
		}

		// If the check based on size fails, we fall back to the slowest of
		// paths. We may be able to make the size-based check a stronger
		// guarantee, so this may be defensive.
		if !verified {
			digester := digest.Canonical.New()

			digestVerifier, err := digest.NewDigestVerifier(desc.Digest)
			if err != nil {
				return distribution.Descriptor{}, err
			}

			// Read the file from the backend driver and validate it.
			fr, err := newFileReader(ctx, bw.bufferedFileWriter.driver, bw.path, desc.Size)
			if err != nil {
				return distribution.Descriptor{}, err
			}

			tr := io.TeeReader(fr, digester.Hash())

			if _, err := io.Copy(digestVerifier, tr); err != nil {
				return distribution.Descriptor{}, err
			}

			canonical = digester.Digest()
			verified = digestVerifier.Verified()
		}
	}

	if !verified {
		context.GetLoggerWithFields(ctx,
			map[string]interface{}{
				"canonical": canonical,
				"provided":  desc.Digest,
			}, "canonical", "provided").
			Errorf("canonical digest does match provided digest")
		return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
			Digest: desc.Digest,
			Reason: fmt.Errorf("content does not match digest"),
		}
	}

	// update desc with canonical hash
	desc.Digest = canonical

	if desc.MediaType == "" {
		desc.MediaType = "application/octet-stream"
	}

	return desc, nil
}
Example #22
0
func (ms *manifests) Get(dgst digest.Digest) (*manifest.SignedManifest, error) {
	// Call by Tag endpoint since the API uses the same
	// URL endpoint for tags and digests.
	return ms.GetByTag(dgst.String())
}
Example #23
0
func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
	return "blobs::" + dgst.String()
}
Example #24
0
func (ms *manifests) Exists(dgst digest.Digest) (bool, error) {
	// Call by Tag endpoint since the API uses the same
	// URL endpoint for tags and digests.
	return ms.ExistsByTag(dgst.String())
}
Example #25
0
func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
	return "repository::" + rsrbds.repo + "::blobs::" + dgst.String()
}