Exemplo n.º 1
0
// schema2ManifestDigest computes the manifest digest, and, if pulling by
// digest, ensures that it matches the requested digest.
func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
	_, canonical, err := mfst.Payload()
	if err != nil {
		return "", err
	}

	// If pull by digest, then verify the manifest digest.
	if digested, isDigested := ref.(reference.Canonical); isDigested {
		verifier, err := digest.NewDigestVerifier(digested.Digest())
		if err != nil {
			return "", err
		}
		if _, err := verifier.Write(canonical); err != nil {
			return "", err
		}
		if !verifier.Verified() {
			err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
			logrus.Error(err)
			return "", err
		}
		return digested.Digest(), nil
	}

	return digest.FromBytes(canonical), nil
}
Exemplo n.º 2
0
// Verify reads the given reader in to a temporary file and validates that
// it matches the digest. If it does, it returns a reader for that allows access
// to the data. Otherwise, it returns an error.
// The caller is responsible for closing the returned reader, in order to
// ensure the temporary file is deleted.
func Verify(r io.Reader, d digest.Digest) (io.ReadCloser, error) {
	w, err := digest.NewDigestVerifier(d)
	if err != nil {
		return nil, err
	}

	tmp, err := ioutil.TempFile("", "unverified-layer")
	if err != nil {
		return nil, err
	}

	_, err = io.Copy(io.MultiWriter(w, tmp), r)
	if err != nil {
		return nil, err
	}

	if !w.Verified() {
		return nil, errors.New("digest verification failed")
	}

	_, err = tmp.Seek(0, 0)
	if err != nil {
		return nil, err
	}

	return &deleteCloser{tmp}, nil
}
Exemplo n.º 3
0
func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
	// If pull by digest, then verify the manifest digest. NOTE: It is
	// important to do this first, before any other content validation. If the
	// digest cannot be verified, don't even bother with those other things.
	if digested, isCanonical := ref.(reference.Canonical); isCanonical {
		verifier, err := digest.NewDigestVerifier(digested.Digest())
		if err != nil {
			return nil, err
		}
		if _, err := verifier.Write(signedManifest.Canonical); err != nil {
			return nil, err
		}
		if !verifier.Verified() {
			err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
			logrus.Error(err)
			return nil, err
		}
	}
	m = &signedManifest.Manifest

	if m.SchemaVersion != 1 {
		return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String())
	}
	if len(m.FSLayers) != len(m.History) {
		return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
	}
	if len(m.FSLayers) == 0 {
		return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
	}
	return m, nil
}
Exemplo n.º 4
0
// ManifestMatchesImage returns true if the provided manifest matches the name of the image.
func ManifestMatchesImage(image *Image, newManifest []byte) (bool, error) {
	dgst, err := digest.ParseDigest(image.Name)
	if err != nil {
		return false, err
	}
	v, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		return false, err
	}
	var canonical []byte

	switch image.DockerImageManifestMediaType {
	case schema2.MediaTypeManifest:
		var m schema2.DeserializedManifest
		if err := json.Unmarshal(newManifest, &m); err != nil {
			return false, err
		}
		_, canonical, err = m.Payload()
		if err != nil {
			return false, err
		}
	case schema1.MediaTypeManifest, "":
		var m schema1.SignedManifest
		if err := json.Unmarshal(newManifest, &m); err != nil {
			return false, err
		}
		canonical = m.Canonical
	default:
		return false, fmt.Errorf("unsupported manifest mediatype: %s", image.DockerImageManifestMediaType)
	}
	if _, err := v.Write(canonical); err != nil {
		return false, err
	}
	return v.Verified(), nil
}
Exemplo n.º 5
0
// GetRemoteCA returns the remote endpoint's CA certificate
func GetRemoteCA(ctx context.Context, d digest.Digest, picker *picker.Picker) (RootCA, error) {
	// We need a valid picker to be able to Dial to a remote CA
	if picker == nil {
		return RootCA{}, fmt.Errorf("valid remote address picker required")
	}

	// This TLS Config is intentionally using InsecureSkipVerify. Either we're
	// doing TOFU, in which case we don't validate the remote CA, or we're using
	// a user supplied hash to check the integrity of the CA certificate.
	insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})
	opts := []grpc.DialOption{
		grpc.WithTransportCredentials(insecureCreds),
		grpc.WithBackoffMaxDelay(10 * time.Second),
		grpc.WithPicker(picker)}

	firstAddr, err := picker.PickAddr()
	if err != nil {
		return RootCA{}, err
	}

	conn, err := grpc.Dial(firstAddr, opts...)
	if err != nil {
		return RootCA{}, err
	}
	defer conn.Close()

	client := api.NewCAClient(conn)
	response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{})
	if err != nil {
		return RootCA{}, err
	}

	if d != "" {
		verifier, err := digest.NewDigestVerifier(d)
		if err != nil {
			return RootCA{}, fmt.Errorf("unexpected error getting digest verifier: %v", err)
		}

		io.Copy(verifier, bytes.NewReader(response.Certificate))

		if !verifier.Verified() {
			return RootCA{}, fmt.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex())

		}
	}

	// Check the validity of the remote Cert
	_, err = helpers.ParseCertificatePEM(response.Certificate)
	if err != nil {
		return RootCA{}, err
	}

	// Create a Pool with our RootCACertificate
	pool := x509.NewCertPool()
	if !pool.AppendCertsFromPEM(response.Certificate) {
		return RootCA{}, fmt.Errorf("failed to append certificate to cert pool")
	}

	return RootCA{Cert: response.Certificate, Pool: pool}, nil
}
Exemplo n.º 6
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q", di.digest)

	blobs := p.repo.Blobs(context.Background())

	desc, err := blobs.Stat(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	digestStr := di.digest.String()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       di.broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(digestStr),
		Action:    "Downloading",
	})
	io.Copy(di.tmpFile, reader)

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", digestStr, di.tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Exemplo n.º 7
0
func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) {
	verifier, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		return nil, err
	}
	return &verifiedReadCloser{
		rc:       rc,
		dgst:     dgst,
		verifier: verifier,
	}, nil
}
Exemplo n.º 8
0
// GetRemoteCA returns the remote endpoint's CA certificate
func GetRemoteCA(ctx context.Context, d digest.Digest, r remotes.Remotes) (RootCA, error) {
	// This TLS Config is intentionally using InsecureSkipVerify. We use the
	// digest instead to check the integrity of the CA certificate.
	insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})
	conn, peer, err := getGRPCConnection(insecureCreds, r)
	if err != nil {
		return RootCA{}, err
	}
	defer conn.Close()

	client := api.NewCAClient(conn)
	ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
	defer cancel()
	defer func() {
		if err != nil {
			r.Observe(peer, -remotes.DefaultObservationWeight)
			return
		}
		r.Observe(peer, remotes.DefaultObservationWeight)
	}()
	response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{})
	if err != nil {
		return RootCA{}, err
	}

	if d != "" {
		verifier, err := digest.NewDigestVerifier(d)
		if err != nil {
			return RootCA{}, errors.Wrap(err, "unexpected error getting digest verifier")
		}

		io.Copy(verifier, bytes.NewReader(response.Certificate))

		if !verifier.Verified() {
			return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex())

		}
	}

	// Check the validity of the remote Cert
	_, err = helpers.ParseCertificatePEM(response.Certificate)
	if err != nil {
		return RootCA{}, err
	}

	// Create a Pool with our RootCACertificate
	pool := x509.NewCertPool()
	if !pool.AppendCertsFromPEM(response.Certificate) {
		return RootCA{}, errors.New("failed to append certificate to cert pool")
	}

	return RootCA{Cert: response.Certificate, Digest: digest.FromBytes(response.Certificate), Pool: pool}, nil
}
Exemplo n.º 9
0
// Finally Push the (signed) manifest of the blobs we've just pushed
func (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, signedManifest, rawManifest []byte, auth *RequestAuthorization) (digest.Digest, error) {
	routeURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)
	if err != nil {
		return "", err
	}

	method := "PUT"
	logrus.Debugf("[registry] Calling %q %s", method, routeURL)
	req, err := http.NewRequest(method, routeURL, bytes.NewReader(signedManifest))
	if err != nil {
		return "", err
	}
	if err := auth.Authorize(req); err != nil {
		return "", err
	}
	res, err := r.client.Do(req)
	if err != nil {
		return "", err
	}
	defer res.Body.Close()

	// All 2xx and 3xx responses can be accepted for a put.
	if res.StatusCode >= 400 {
		if res.StatusCode == 401 {
			return "", errLoginRequired
		}
		errBody, err := ioutil.ReadAll(res.Body)
		if err != nil {
			return "", err
		}
		logrus.Debugf("Unexpected response from server: %q %#v", errBody, res.Header)
		return "", httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to push %s:%s manifest", res.StatusCode, imageName, tagName), res)
	}

	hdrDigest, err := digest.ParseDigest(res.Header.Get(DockerDigestHeader))
	if err != nil {
		return "", fmt.Errorf("invalid manifest digest from registry: %s", err)
	}

	dgstVerifier, err := digest.NewDigestVerifier(hdrDigest)
	if err != nil {
		return "", fmt.Errorf("invalid manifest digest from registry: %s", err)
	}

	dgstVerifier.Write(rawManifest)

	if !dgstVerifier.Verified() {
		computedDigest, _ := digest.FromBytes(rawManifest)
		return "", fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", hdrDigest, computedDigest)
	}

	return hdrDigest, nil
}
Exemplo n.º 10
0
func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) {
	// If pull by digest, then verify the manifest digest. NOTE: It is
	// important to do this first, before any other content validation. If the
	// digest cannot be verified, don't even bother with those other things.
	if manifestDigest, err := digest.ParseDigest(tag); err == nil {
		verifier, err := digest.NewDigestVerifier(manifestDigest)
		if err != nil {
			return false, err
		}
		payload, err := m.Payload()
		if err != nil {
			return false, err
		}
		if _, err := verifier.Write(payload); err != nil {
			return false, err
		}
		if !verifier.Verified() {
			err := fmt.Errorf("image verification failed for digest %s", manifestDigest)
			logrus.Error(err)
			return false, err
		}
	}

	// TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ?
	if m == nil {
		return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
	}
	if m.SchemaVersion != 1 {
		return false, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag)
	}
	if len(m.FSLayers) != len(m.History) {
		return false, fmt.Errorf("length of history not equal to number of layers for tag %q", tag)
	}
	if len(m.FSLayers) == 0 {
		return false, fmt.Errorf("no FSLayers in manifest for tag %q", tag)
	}
	keys, err := manifest.Verify(m)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest for tag %q: %v", tag, err)
	}
	verified, err = p.verifyTrustedKeys(m.Name, keys)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest keys: %v", err)
	}
	return verified, nil
}
Exemplo n.º 11
0
func verifyManifest(signedManifest *schema1.SignedManifest, tag string) (m *schema1.Manifest, err error) {
	// If pull by digest, then verify the manifest digest. NOTE: It is
	// important to do this first, before any other content validation. If the
	// digest cannot be verified, don't even bother with those other things.
	if manifestDigest, err := digest.ParseDigest(tag); err == nil {
		verifier, err := digest.NewDigestVerifier(manifestDigest)
		if err != nil {
			return nil, err
		}
		payload, err := signedManifest.Payload()
		if err != nil {
			// If this failed, the signatures section was corrupted
			// or missing. Treat the entire manifest as the payload.
			payload = signedManifest.Raw
		}
		if _, err := verifier.Write(payload); err != nil {
			return nil, err
		}
		if !verifier.Verified() {
			err := fmt.Errorf("image verification failed for digest %s", manifestDigest)
			logrus.Error(err)
			return nil, err
		}

		var verifiedManifest schema1.Manifest
		if err = json.Unmarshal(payload, &verifiedManifest); err != nil {
			return nil, err
		}
		m = &verifiedManifest
	} else {
		m = &signedManifest.Manifest
	}

	if m.SchemaVersion != 1 {
		return nil, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag)
	}
	if len(m.FSLayers) != len(m.History) {
		return nil, fmt.Errorf("length of history not equal to number of layers for tag %q", tag)
	}
	if len(m.FSLayers) == 0 {
		return nil, fmt.Errorf("no FSLayers in manifest for tag %q", tag)
	}
	return m, nil
}
Exemplo n.º 12
0
// ManifestMatchesImage returns true if the provided manifest matches the name of the image.
func ManifestMatchesImage(image *Image, newManifest []byte) (bool, error) {
	dgst, err := digest.ParseDigest(image.Name)
	if err != nil {
		return false, err
	}
	v, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		return false, err
	}
	sm := schema1.SignedManifest{Raw: newManifest}
	raw, err := sm.Payload()
	if err != nil {
		return false, err
	}
	if _, err := v.Write(raw); err != nil {
		return false, err
	}
	return v.Verified(), nil
}
Exemplo n.º 13
0
// ImageConfigMatchesImage returns true if the provided image config matches a digest
// stored in the manifest of the image.
func ImageConfigMatchesImage(image *Image, imageConfig []byte) (bool, error) {
	if image.DockerImageManifestMediaType != schema2.MediaTypeManifest {
		return false, nil
	}

	var m schema2.DeserializedManifest
	if err := json.Unmarshal([]byte(image.DockerImageManifest), &m); err != nil {
		return false, err
	}

	v, err := digest.NewDigestVerifier(m.Config.Digest)
	if err != nil {
		return false, err
	}

	if _, err := v.Write(imageConfig); err != nil {
		return false, err
	}

	return v.Verified(), nil
}
Exemplo n.º 14
0
func (p *v2Puller) validateManifest(m *manifest.SignedManifest, tag string) (verified bool, err error) {
	// TODO(tiborvass): what's the usecase for having manifest == nil and err == nil ? Shouldn't be the error be "DoesNotExist" ?
	if m == nil {
		return false, fmt.Errorf("image manifest does not exist for tag %q", tag)
	}
	if m.SchemaVersion != 1 {
		return false, fmt.Errorf("unsupported schema version %d for tag %q", m.SchemaVersion, tag)
	}
	if len(m.FSLayers) != len(m.History) {
		return false, fmt.Errorf("length of history not equal to number of layers for tag %q", tag)
	}
	if len(m.FSLayers) == 0 {
		return false, fmt.Errorf("no FSLayers in manifest for tag %q", tag)
	}
	keys, err := manifest.Verify(m)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest for tag %q: %v", tag, err)
	}
	verified, err = p.verifyTrustedKeys(m.Name, keys)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest keys: %v", err)
	}
	localDigest, err := digest.ParseDigest(tag)
	// if pull by digest, then verify
	if err == nil {
		verifier, err := digest.NewDigestVerifier(localDigest)
		if err != nil {
			return false, err
		}
		payload, err := m.Payload()
		if err != nil {
			return false, err
		}
		if _, err := verifier.Write(payload); err != nil {
			return false, err
		}
		verified = verified && verifier.Verified()
	}
	return verified, nil
}
Exemplo n.º 15
0
func TestSimpleRead(t *testing.T) {
	ctx := context.Background()
	content := make([]byte, 1<<20)
	n, err := mrand.Read(content)
	if err != nil {
		t.Fatalf("unexpected error building random data: %v", err)
	}

	if n != len(content) {
		t.Fatalf("random read didn't fill buffer")
	}

	dgst, err := digest.FromReader(bytes.NewReader(content))
	if err != nil {
		t.Fatalf("unexpected error digesting random content: %v", err)
	}

	driver := inmemory.New()
	path := "/random"

	if err := driver.PutContent(ctx, path, content); err != nil {
		t.Fatalf("error putting patterned content: %v", err)
	}

	fr, err := newFileReader(ctx, driver, path, int64(len(content)))
	if err != nil {
		t.Fatalf("error allocating file reader: %v", err)
	}

	verifier, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		t.Fatalf("error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify read data")
	}
}
Exemplo n.º 16
0
// verifyDigest checks the contents of p against the provided digest. Note
// that for manifests, this is the signed payload and not the raw bytes with
// signatures.
func verifyDigest(dgst digest.Digest, p []byte) error {
	if err := dgst.Validate(); err != nil {
		return fmt.Errorf("error validating  digest %q: %v", dgst, err)
	}

	verifier, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		// There are not many ways this can go wrong: if it does, its
		// fatal. Likley, the cause would be poor validation of the
		// incoming reference.
		return fmt.Errorf("error creating verifier for digest %q: %v", dgst, err)
	}

	if _, err := verifier.Write(p); err != nil {
		return fmt.Errorf("error writing payload to digest verifier (verifier target %q): %v", dgst, err)
	}

	if !verifier.Verified() {
		return fmt.Errorf("verification against digest %q failed", dgst)
	}

	return nil
}
Exemplo n.º 17
0
func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
	blobs := p.repo.Blobs(ctx)
	configJSON, err = blobs.Get(ctx, dgst)
	if err != nil {
		return nil, err
	}

	// Verify image config digest
	verifier, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		return nil, err
	}
	if _, err := verifier.Write(configJSON); err != nil {
		return nil, err
	}
	if !verifier.Verified() {
		err := fmt.Errorf("image config verification failed for digest %s", dgst)
		logrus.Error(err)
		return nil, err
	}

	return configJSON, nil
}
Exemplo n.º 18
0
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) {
	var (
		verified, fullHash bool
		canonical          digest.Digest
	)

	if desc.Digest == "" {
		// if no descriptors are provided, we have nothing to validate
		// against. We don't really want to support this for the registry.
		return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
			Reason: fmt.Errorf("cannot validate against empty digest"),
		}
	}

	// Stat the on disk file
	if fi, err := bw.fileWriter.driver.Stat(ctx, bw.path); err != nil {
		switch err := err.(type) {
		case storagedriver.PathNotFoundError:
			// NOTE(stevvooe): We really don't care if the file is
			// not actually present for the reader. We now assume
			// that the desc length is zero.
			desc.Size = 0
		default:
			// Any other error we want propagated up the stack.
			return distribution.Descriptor{}, err
		}
	} else {
		if fi.IsDir() {
			return distribution.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
		}

		bw.size = fi.Size()
	}

	if desc.Size > 0 {
		if desc.Size != bw.size {
			return distribution.Descriptor{}, distribution.ErrBlobInvalidLength
		}
	} else {
		// if provided 0 or negative length, we can assume caller doesn't know or
		// care about length.
		desc.Size = bw.size
	}

	// TODO(stevvooe): This section is very meandering. Need to be broken down
	// to be a lot more clear.

	if err := bw.resumeDigestAt(ctx, bw.size); err == nil {
		canonical = bw.digester.Digest()

		if canonical.Algorithm() == desc.Digest.Algorithm() {
			// Common case: client and server prefer the same canonical digest
			// algorithm - currently SHA256.
			verified = desc.Digest == canonical
		} else {
			// The client wants to use a different digest algorithm. They'll just
			// have to be patient and wait for us to download and re-hash the
			// uploaded content using that digest algorithm.
			fullHash = true
		}
	} else if err == errResumableDigestNotAvailable {
		// Not using resumable digests, so we need to hash the entire layer.
		fullHash = true
	} else {
		return distribution.Descriptor{}, err
	}

	if fullHash {
		// a fantastic optimization: if the the written data and the size are
		// the same, we don't need to read the data from the backend. This is
		// because we've written the entire file in the lifecycle of the
		// current instance.
		if bw.written == bw.size && digest.Canonical == desc.Digest.Algorithm() {
			canonical = bw.digester.Digest()
			verified = desc.Digest == canonical
		}

		// If the check based on size fails, we fall back to the slowest of
		// paths. We may be able to make the size-based check a stronger
		// guarantee, so this may be defensive.
		if !verified {
			digester := digest.Canonical.New()

			digestVerifier, err := digest.NewDigestVerifier(desc.Digest)
			if err != nil {
				return distribution.Descriptor{}, err
			}

			// Read the file from the backend driver and validate it.
			fr, err := newFileReader(ctx, bw.fileWriter.driver, bw.path, desc.Size)
			if err != nil {
				return distribution.Descriptor{}, err
			}
			defer fr.Close()

			tr := io.TeeReader(fr, digester.Hash())

			if _, err := io.Copy(digestVerifier, tr); err != nil {
				return distribution.Descriptor{}, err
			}

			canonical = digester.Digest()
			verified = digestVerifier.Verified()
		}
	}

	if !verified {
		context.GetLoggerWithFields(ctx,
			map[interface{}]interface{}{
				"canonical": canonical,
				"provided":  desc.Digest,
			}, "canonical", "provided").
			Errorf("canonical digest does match provided digest")
		return distribution.Descriptor{}, distribution.ErrBlobInvalidDigest{
			Digest: desc.Digest,
			Reason: fmt.Errorf("content does not match digest"),
		}
	}

	// update desc with canonical hash
	desc.Digest = canonical

	if desc.MediaType == "" {
		desc.MediaType = "application/octet-stream"
	}

	return desc, nil
}
Exemplo n.º 19
0
// GetRemoteCA returns the remote endpoint's CA certificate
func GetRemoteCA(ctx context.Context, d digest.Digest, r remotes.Remotes) (RootCA, error) {
	// This TLS Config is intentionally using InsecureSkipVerify. Either we're
	// doing TOFU, in which case we don't validate the remote CA, or we're using
	// a user supplied hash to check the integrity of the CA certificate.
	insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true})
	opts := []grpc.DialOption{
		grpc.WithTransportCredentials(insecureCreds),
		grpc.WithTimeout(5 * time.Second),
		grpc.WithBackoffMaxDelay(5 * time.Second),
	}

	peer, err := r.Select()
	if err != nil {
		return RootCA{}, err
	}

	conn, err := grpc.Dial(peer.Addr, opts...)
	if err != nil {
		return RootCA{}, err
	}
	defer conn.Close()

	client := api.NewCAClient(conn)
	ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
	defer cancel()
	defer func() {
		if err != nil {
			r.Observe(peer, -remotes.DefaultObservationWeight)
			return
		}
		r.Observe(peer, remotes.DefaultObservationWeight)
	}()
	response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{})
	if err != nil {
		return RootCA{}, err
	}

	if d != "" {
		verifier, err := digest.NewDigestVerifier(d)
		if err != nil {
			return RootCA{}, errors.Wrap(err, "unexpected error getting digest verifier")
		}

		io.Copy(verifier, bytes.NewReader(response.Certificate))

		if !verifier.Verified() {
			return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex())

		}
	}

	// Check the validity of the remote Cert
	_, err = helpers.ParseCertificatePEM(response.Certificate)
	if err != nil {
		return RootCA{}, err
	}

	// Create a Pool with our RootCACertificate
	pool := x509.NewCertPool()
	if !pool.AppendCertsFromPEM(response.Certificate) {
		return RootCA{}, errors.New("failed to append certificate to cert pool")
	}

	return RootCA{Cert: response.Certificate, Digest: digest.FromBytes(response.Certificate), Pool: pool}, nil
}
Exemplo n.º 20
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID)

	out := p.config.OutStream

	if c, err := p.poolAdd("pull", "img:"+di.img.ID); err != nil {
		if c != nil {
			out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Layer already being pulled by another client. Waiting.", nil))
			<-c
			out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
		} else {
			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", di.img.ID, err)
		}
		di.err <- nil
		return
	}

	defer p.poolRemove("pull", "img:"+di.img.ID)
	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		di.err <- err
		return
	}

	blobs := p.repo.Blobs(nil)

	desc, err := blobs.Stat(nil, di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(nil, di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       out,
		Formatter: p.sf,
		Size:      int(di.size),
		NewLines:  false,
		ID:        stringid.TruncateID(di.img.ID),
		Action:    "Downloading",
	})
	io.Copy(tmpFile, reader)

	out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name())
	di.tmpFile = tmpFile
	di.layer = layerDownload

	di.err <- nil
}
Exemplo n.º 21
0
// loadManifest loads a manifest from a byte array and verifies its content.
// The signature must be verified or an error is returned. If the manifest
// contains no signatures by a trusted key for the name in the manifest, the
// image is not considered verified. The parsed manifest object and a boolean
// for whether the manifest is verified is returned.
func (s *TagStore) loadManifest(eng *engine.Engine, manifestBytes []byte, dgst, ref string) (*registry.ManifestData, bool, error) {
	sig, err := libtrust.ParsePrettySignature(manifestBytes, "signatures")
	if err != nil {
		return nil, false, fmt.Errorf("error parsing payload: %s", err)
	}

	keys, err := sig.Verify()
	if err != nil {
		return nil, false, fmt.Errorf("error verifying payload: %s", err)
	}

	payload, err := sig.Payload()
	if err != nil {
		return nil, false, fmt.Errorf("error retrieving payload: %s", err)
	}

	var manifestDigest digest.Digest

	if dgst != "" {
		manifestDigest, err = digest.ParseDigest(dgst)
		if err != nil {
			return nil, false, fmt.Errorf("invalid manifest digest from registry: %s", err)
		}

		dgstVerifier, err := digest.NewDigestVerifier(manifestDigest)
		if err != nil {
			return nil, false, fmt.Errorf("unable to verify manifest digest from registry: %s", err)
		}

		dgstVerifier.Write(payload)

		if !dgstVerifier.Verified() {
			computedDigest, _ := digest.FromBytes(payload)
			return nil, false, fmt.Errorf("unable to verify manifest digest: registry has %q, computed %q", manifestDigest, computedDigest)
		}
	}

	if utils.DigestReference(ref) && ref != manifestDigest.String() {
		return nil, false, fmt.Errorf("mismatching image manifest digest: got %q, expected %q", manifestDigest, ref)
	}

	var manifest registry.ManifestData
	if err := json.Unmarshal(payload, &manifest); err != nil {
		return nil, false, fmt.Errorf("error unmarshalling manifest: %s", err)
	}
	if manifest.SchemaVersion != 1 {
		return nil, false, fmt.Errorf("unsupported schema version: %d", manifest.SchemaVersion)
	}

	var verified bool
	for _, key := range keys {
		job := eng.Job("trust_key_check")
		b, err := key.MarshalJSON()
		if err != nil {
			return nil, false, fmt.Errorf("error marshalling public key: %s", err)
		}
		namespace := manifest.Name
		if namespace[0] != '/' {
			namespace = "/" + namespace
		}
		stdoutBuffer := bytes.NewBuffer(nil)

		job.Args = append(job.Args, namespace)
		job.Setenv("PublicKey", string(b))
		// Check key has read/write permission (0x03)
		job.SetenvInt("Permission", 0x03)
		job.Stdout.Add(stdoutBuffer)
		if err = job.Run(); err != nil {
			return nil, false, fmt.Errorf("error running key check: %s", err)
		}
		result := engine.Tail(stdoutBuffer, 1)
		logrus.Debugf("Key check result: %q", result)
		if result == "verified" {
			verified = true
		}
	}

	return &manifest, verified, nil
}
Exemplo n.º 22
0
// TestSimpleWrite takes the fileWriter through common write operations
// ensuring data integrity.
func TestSimpleWrite(t *testing.T) {
	content := make([]byte, 1<<20)
	n, err := rand.Read(content)
	if err != nil {
		t.Fatalf("unexpected error building random data: %v", err)
	}

	if n != len(content) {
		t.Fatalf("random read did't fill buffer")
	}

	dgst, err := digest.FromReader(bytes.NewReader(content))
	if err != nil {
		t.Fatalf("unexpected error digesting random content: %v", err)
	}

	driver := inmemory.New()
	path := "/random"

	fw, err := newFileWriter(driver, path)
	if err != nil {
		t.Fatalf("unexpected error creating fileWriter: %v", err)
	}
	defer fw.Close()

	n, err = fw.Write(content)
	if err != nil {
		t.Fatalf("unexpected error writing content: %v", err)
	}
	fw.Flush()

	if n != len(content) {
		t.Fatalf("unexpected write length: %d != %d", n, len(content))
	}

	fr, err := newFileReader(driver, path)
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	verifier, err := digest.NewDigestVerifier(dgst)
	if err != nil {
		t.Fatalf("unexpected error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify write data")
	}

	// Check the seek position is equal to the content length
	end, err := fw.Seek(0, os.SEEK_END)
	if err != nil {
		t.Fatalf("unexpected error seeking: %v", err)
	}

	if end != int64(len(content)) {
		t.Fatalf("write did not advance offset: %d != %d", end, len(content))
	}

	// Double the content, but use the WriteAt method
	doubled := append(content, content...)
	doubledgst, err := digest.FromReader(bytes.NewReader(doubled))
	if err != nil {
		t.Fatalf("unexpected error digesting doubled content: %v", err)
	}

	n, err = fw.WriteAt(content, end)
	if err != nil {
		t.Fatalf("unexpected error writing content at %d: %v", end, err)
	}

	if n != len(content) {
		t.Fatalf("writeat was short: %d != %d", n, len(content))
	}

	fr, err = newFileReader(driver, path)
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	verifier, err = digest.NewDigestVerifier(doubledgst)
	if err != nil {
		t.Fatalf("unexpected error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify write data")
	}

	// Check that WriteAt didn't update the offset.
	end, err = fw.Seek(0, os.SEEK_END)
	if err != nil {
		t.Fatalf("unexpected error seeking: %v", err)
	}

	if end != int64(len(content)) {
		t.Fatalf("write did not advance offset: %d != %d", end, len(content))
	}

	// Now, we copy from one path to another, running the data through the
	// fileReader to fileWriter, rather than the driver.Move command to ensure
	// everything is working correctly.
	fr, err = newFileReader(driver, path)
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	fw, err = newFileWriter(driver, "/copied")
	if err != nil {
		t.Fatalf("unexpected error creating fileWriter: %v", err)
	}
	defer fw.Close()

	nn, err := io.Copy(fw, fr)
	if err != nil {
		t.Fatalf("unexpected error copying data: %v", err)
	}

	if nn != int64(len(doubled)) {
		t.Fatalf("unexpected copy length: %d != %d", nn, len(doubled))
	}

	fr, err = newFileReader(driver, "/copied")
	if err != nil {
		t.Fatalf("unexpected error creating fileReader: %v", err)
	}
	defer fr.Close()

	verifier, err = digest.NewDigestVerifier(doubledgst)
	if err != nil {
		t.Fatalf("unexpected error getting digest verifier: %s", err)
	}

	io.Copy(verifier, fr)

	if !verifier.Verified() {
		t.Fatalf("unable to verify write data")
	}
}
Exemplo n.º 23
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q", di.digest)

	blobs := p.repo.Blobs(context.Background())

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	di.size, err = layerDownload.Seek(0, os.SEEK_END)
	if err != nil {
		// Seek failed, perhaps because there was no Content-Length
		// header. This shouldn't fail the download, because we can
		// still continue without a progress bar.
		di.size = 0
	} else {
		// Restore the seek offset at the beginning of the stream.
		_, err = layerDownload.Seek(0, os.SEEK_SET)
		if err != nil {
			di.err <- err
			return
		}
	}

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	digestStr := di.digest.String()

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       di.broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(digestStr),
		Action:    "Downloading",
	})
	io.Copy(di.tmpFile, reader)

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	di.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(digestStr), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", digestStr, di.tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Exemplo n.º 24
0
func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
	logrus.Debugf("pulling blob %q", ld.digest)

	blobs := ld.repo.Blobs(ctx)

	layerDownload, err := blobs.Open(ctx, ld.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		if err == distribution.ErrBlobUnknown {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
		return nil, 0, retryOnError(err)
	}

	size, err := layerDownload.Seek(0, os.SEEK_END)
	if err != nil {
		// Seek failed, perhaps because there was no Content-Length
		// header. This shouldn't fail the download, because we can
		// still continue without a progress bar.
		size = 0
	} else {
		// Restore the seek offset at the beginning of the stream.
		_, err = layerDownload.Seek(0, os.SEEK_SET)
		if err != nil {
			return nil, 0, err
		}
	}

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading")
	defer reader.Close()

	verifier, err := digest.NewDigestVerifier(ld.digest)
	if err != nil {
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	_, err = io.Copy(tmpFile, io.TeeReader(reader, verifier))
	if err != nil {
		return nil, 0, retryOnError(err)
	}

	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
		logrus.Error(err)
		tmpFile.Close()
		if err := os.RemoveAll(tmpFile.Name()); err != nil {
			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
		}

		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	progress.Update(progressOutput, ld.ID(), "Download complete")

	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())

	tmpFile.Seek(0, 0)
	return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil
}
Exemplo n.º 25
0
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
	log.Debugf("Pulling tag from V2 registry: %q", tag)

	manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if err := checkValidManifest(manifest); err != nil {
		return false, err
	}

	if verified {
		log.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			log.Debugf("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
				} else {
					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        common.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
					verified = false
				}

				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))

				log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		if parallel {
			downloads[i].err = make(chan error)
			go func(di *downloadInfo) {
				di.err <- downloadFunc(di)
			}(&downloads[i])
		} else {
			err := downloadFunc(&downloads[i])
			if err != nil {
				return false, err
			}
		}
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			err := <-d.err
			if err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        common.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if manifestDigest != "" {
		out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	if utils.DigestReference(tag) {
		if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}
Exemplo n.º 26
0
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)

	remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if verified {
		logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	// downloadInfo is used to pass information from download to extractor
	type downloadInfo struct {
		imgJSON    []byte
		img        *image.Image
		digest     digest.Digest
		tmpFile    *os.File
		length     int64
		downloaded bool
		err        chan error
	}

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			logrus.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
				} else {
					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        stringid.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					return fmt.Errorf("image layer digest verification failed for %q", di.digest)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))

				logrus.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		downloads[i].err = make(chan error)
		go func(di *downloadInfo) {
			di.err <- downloadFunc(di)
		}(&downloads[i])
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			if err := <-d.err; err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        stringid.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				if err := s.graph.SetDigest(d.img.ID, d.digest); err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if localDigest != remoteDigest { // this is not a verification check.
		// NOTE(stevvooe): This is a very defensive branch and should never
		// happen, since all manifest digest implementations use the same
		// algorithm.
		logrus.WithFields(
			logrus.Fields{
				"local":  localDigest,
				"remote": remoteDigest,
			}).Debugf("local digest does not match remote")

		out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest))
	}

	out.Write(sf.FormatStatus("", "Digest: %s", localDigest))

	if tag == localDigest.String() {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.

		if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil {
			return false, err
		}
	}

	if !utils.DigestReference(tag) {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}
Exemplo n.º 27
0
func (p *v2Puller) download(di *downloadInfo) {
	logrus.Debugf("pulling blob %q to %s", di.digest, di.img.ID)

	out := di.out

	broadcaster, found := p.poolAdd("pull", "img:"+di.img.ID)
	if found {
		broadcaster.Add(out)
		broadcaster.Wait()
		out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
		di.err <- nil
		return
	}

	broadcaster.Add(out)
	defer p.poolRemove("pull", "img:"+di.img.ID)
	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		di.err <- err
		return
	}
	di.tmpFile = tmpFile

	blobs := p.repo.Blobs(context.Background())

	desc, err := blobs.Stat(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		di.err <- err
		return
	}
	di.size = desc.Size

	layerDownload, err := blobs.Open(context.Background(), di.digest)
	if err != nil {
		logrus.Debugf("Error fetching layer: %v", err)
		di.err <- err
		return
	}
	defer layerDownload.Close()

	verifier, err := digest.NewDigestVerifier(di.digest)
	if err != nil {
		di.err <- err
		return
	}

	reader := progressreader.New(progressreader.Config{
		In:        ioutil.NopCloser(io.TeeReader(layerDownload, verifier)),
		Out:       broadcaster,
		Formatter: p.sf,
		Size:      di.size,
		NewLines:  false,
		ID:        stringid.TruncateID(di.img.ID),
		Action:    "Downloading",
	})
	io.Copy(tmpFile, reader)

	broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Verifying Checksum", nil))

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", di.digest)
		logrus.Error(err)
		di.err <- err
		return
	}

	broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))

	logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name())
	di.layer = layerDownload

	di.err <- nil
}
Exemplo n.º 28
0
func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
	logrus.Debugf("pulling blob %q", ld.digest)

	var (
		err    error
		offset int64
	)

	if ld.tmpFile == nil {
		ld.tmpFile, err = createDownloadFile()
		if err != nil {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
	} else {
		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
		if err != nil {
			logrus.Debugf("error seeking to end of download file: %v", err)
			offset = 0

			ld.tmpFile.Close()
			if err := os.Remove(ld.tmpFile.Name()); err != nil {
				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
			}
			ld.tmpFile, err = createDownloadFile()
			if err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
		} else if offset != 0 {
			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
		}
	}

	tmpFile := ld.tmpFile

	layerDownload, err := ld.open(ctx)
	if err != nil {
		logrus.Errorf("Error initiating layer download: %v", err)
		if err == distribution.ErrBlobUnknown {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
		return nil, 0, retryOnError(err)
	}

	if offset != 0 {
		_, err := layerDownload.Seek(offset, os.SEEK_SET)
		if err != nil {
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
			return nil, 0, err
		}
	}
	size, err := layerDownload.Seek(0, os.SEEK_END)
	if err != nil {
		// Seek failed, perhaps because there was no Content-Length
		// header. This shouldn't fail the download, because we can
		// still continue without a progress bar.
		size = 0
	} else {
		if size != 0 && offset > size {
			logrus.Debug("Partial download is larger than full blob. Starting over")
			offset = 0
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
		}

		// Restore the seek offset either at the beginning of the
		// stream, or just after the last byte we have from previous
		// attempts.
		_, err = layerDownload.Seek(offset, os.SEEK_SET)
		if err != nil {
			return nil, 0, err
		}
	}

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
	defer reader.Close()

	if ld.verifier == nil {
		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
		if err != nil {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
	}

	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
	if err != nil {
		if err == transport.ErrWrongCodeForByteRange {
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
			return nil, 0, err
		}
		return nil, 0, retryOnError(err)
	}

	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")

	if !ld.verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
		logrus.Error(err)

		// Allow a retry if this digest verification error happened
		// after a resumed download.
		if offset != 0 {
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}

			return nil, 0, err
		}
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	progress.Update(progressOutput, ld.ID(), "Download complete")

	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())

	_, err = tmpFile.Seek(0, os.SEEK_SET)
	if err != nil {
		tmpFile.Close()
		if err := os.Remove(tmpFile.Name()); err != nil {
			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
		}
		ld.tmpFile = nil
		ld.verifier = nil
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	// hand off the temporary file to the download manager, so it will only
	// be closed once
	ld.tmpFile = nil

	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
		tmpFile.Close()
		err := os.RemoveAll(tmpFile.Name())
		if err != nil {
			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
		}
		return err
	}), size, nil
}
Exemplo n.º 29
0
// validateLayer checks the layer data against the digest, returning an error
// if it does not match. The canonical digest is returned.
func (lw *layerWriter) validateLayer(dgst digest.Digest) (digest.Digest, error) {
	var (
		verified, fullHash bool
		canonical          digest.Digest
	)

	if lw.resumableDigester != nil {
		// Restore the hasher state to the end of the upload.
		if err := lw.resumeHashAt(lw.size); err != nil {
			return "", err
		}

		canonical = lw.resumableDigester.Digest()

		if canonical.Algorithm() == dgst.Algorithm() {
			// Common case: client and server prefer the same canonical digest
			// algorithm - currently SHA256.
			verified = dgst == canonical
		} else {
			// The client wants to use a different digest algorithm. They'll just
			// have to be patient and wait for us to download and re-hash the
			// uploaded content using that digest algorithm.
			fullHash = true
		}
	} else {
		// Not using resumable digests, so we need to hash the entire layer.
		fullHash = true
	}

	if fullHash {
		digester := digest.NewCanonicalDigester()

		digestVerifier, err := digest.NewDigestVerifier(dgst)
		if err != nil {
			return "", err
		}

		// Read the file from the backend driver and validate it.
		fr, err := newFileReader(lw.layerStore.repository.ctx, lw.bufferedFileWriter.driver, lw.path)
		if err != nil {
			return "", err
		}

		tr := io.TeeReader(fr, digester)

		if _, err = io.Copy(digestVerifier, tr); err != nil {
			return "", err
		}

		canonical = digester.Digest()
		verified = digestVerifier.Verified()
	}

	if !verified {
		context.GetLoggerWithField(lw.layerStore.repository.ctx, "canonical", dgst).
			Errorf("canonical digest does match provided digest")
		return "", distribution.ErrLayerInvalidDigest{
			Digest: dgst,
			Reason: fmt.Errorf("content does not match digest"),
		}
	}

	return canonical, nil
}
Exemplo n.º 30
0
// TestBlobAPI conducts a full test of the of the blob api.
func TestBlobAPI(t *testing.T) {
	// TODO(stevvooe): This test code is complete junk but it should cover the
	// complete flow. This must be broken down and checked against the
	// specification *before* we submit the final to docker core.
	env := newTestEnv(t)

	imageName := "foo/bar"
	// "build" our layer file
	layerFile, tarSumStr, err := testutil.CreateRandomTarFile()
	if err != nil {
		t.Fatalf("error creating random layer file: %v", err)
	}

	layerDigest := digest.Digest(tarSumStr)

	// -----------------------------------
	// Test fetch for non-existent content
	layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest)
	if err != nil {
		t.Fatalf("error building url: %v", err)
	}

	resp, err := http.Get(layerURL)
	if err != nil {
		t.Fatalf("unexpected error fetching non-existent layer: %v", err)
	}

	checkResponse(t, "fetching non-existent content", resp, http.StatusNotFound)

	// ------------------------------------------
	// Test head request for non-existent content
	resp, err = http.Head(layerURL)
	if err != nil {
		t.Fatalf("unexpected error checking head on non-existent layer: %v", err)
	}

	checkResponse(t, "checking head on non-existent layer", resp, http.StatusNotFound)

	// ------------------------------------------
	// Start an upload, check the status then cancel
	uploadURLBase, uploadUUID := startPushLayer(t, env.builder, imageName)

	// A status check should work
	resp, err = http.Get(uploadURLBase)
	if err != nil {
		t.Fatalf("unexpected error getting upload status: %v", err)
	}
	checkResponse(t, "status of deleted upload", resp, http.StatusNoContent)
	checkHeaders(t, resp, http.Header{
		"Location":           []string{"*"},
		"Range":              []string{"0-0"},
		"Docker-Upload-UUID": []string{uploadUUID},
	})

	req, err := http.NewRequest("DELETE", uploadURLBase, nil)
	if err != nil {
		t.Fatalf("unexpected error creating delete request: %v", err)
	}

	resp, err = http.DefaultClient.Do(req)
	if err != nil {
		t.Fatalf("unexpected error sending delete request: %v", err)
	}

	checkResponse(t, "deleting upload", resp, http.StatusNoContent)

	// A status check should result in 404
	resp, err = http.Get(uploadURLBase)
	if err != nil {
		t.Fatalf("unexpected error getting upload status: %v", err)
	}
	checkResponse(t, "status of deleted upload", resp, http.StatusNotFound)

	// -----------------------------------------
	// Do layer push with an empty body and different digest
	uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName)
	resp, err = doPushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, bytes.NewReader([]byte{}))
	if err != nil {
		t.Fatalf("unexpected error doing bad layer push: %v", err)
	}

	checkResponse(t, "bad layer push", resp, http.StatusBadRequest)
	checkBodyHasErrorCodes(t, "bad layer push", resp, v2.ErrorCodeDigestInvalid)

	// -----------------------------------------
	// Do layer push with an empty body and correct digest
	zeroDigest, err := digest.FromTarArchive(bytes.NewReader([]byte{}))
	if err != nil {
		t.Fatalf("unexpected error digesting empty buffer: %v", err)
	}

	uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName)
	pushLayer(t, env.builder, imageName, zeroDigest, uploadURLBase, bytes.NewReader([]byte{}))

	// -----------------------------------------
	// Do layer push with an empty body and correct digest

	// This is a valid but empty tarfile!
	emptyTar := bytes.Repeat([]byte("\x00"), 1024)
	emptyDigest, err := digest.FromTarArchive(bytes.NewReader(emptyTar))
	if err != nil {
		t.Fatalf("unexpected error digesting empty tar: %v", err)
	}

	uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName)
	pushLayer(t, env.builder, imageName, emptyDigest, uploadURLBase, bytes.NewReader(emptyTar))

	// ------------------------------------------
	// Now, actually do successful upload.
	layerLength, _ := layerFile.Seek(0, os.SEEK_END)
	layerFile.Seek(0, os.SEEK_SET)

	uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName)
	pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile)

	// ------------------------------------------
	// Now, push just a chunk
	layerFile.Seek(0, 0)

	canonicalDigester := digest.Canonical.New()
	if _, err := io.Copy(canonicalDigester.Hash(), layerFile); err != nil {
		t.Fatalf("error copying to digest: %v", err)
	}
	canonicalDigest := canonicalDigester.Digest()

	layerFile.Seek(0, 0)
	uploadURLBase, uploadUUID = startPushLayer(t, env.builder, imageName)
	uploadURLBase, dgst := pushChunk(t, env.builder, imageName, uploadURLBase, layerFile, layerLength)
	finishUpload(t, env.builder, imageName, uploadURLBase, dgst)
	// ------------------------
	// Use a head request to see if the layer exists.
	resp, err = http.Head(layerURL)
	if err != nil {
		t.Fatalf("unexpected error checking head on existing layer: %v", err)
	}

	checkResponse(t, "checking head on existing layer", resp, http.StatusOK)
	checkHeaders(t, resp, http.Header{
		"Content-Length":        []string{fmt.Sprint(layerLength)},
		"Docker-Content-Digest": []string{canonicalDigest.String()},
	})

	// ----------------
	// Fetch the layer!
	resp, err = http.Get(layerURL)
	if err != nil {
		t.Fatalf("unexpected error fetching layer: %v", err)
	}

	checkResponse(t, "fetching layer", resp, http.StatusOK)
	checkHeaders(t, resp, http.Header{
		"Content-Length":        []string{fmt.Sprint(layerLength)},
		"Docker-Content-Digest": []string{canonicalDigest.String()},
	})

	// Verify the body
	verifier, err := digest.NewDigestVerifier(layerDigest)
	if err != nil {
		t.Fatalf("unexpected error getting digest verifier: %s", err)
	}
	io.Copy(verifier, resp.Body)

	if !verifier.Verified() {
		t.Fatalf("response body did not pass verification")
	}

	// ----------------
	// Fetch the layer with an invalid digest
	badURL := strings.Replace(layerURL, "tarsum", "trsum", 1)
	resp, err = http.Get(badURL)
	if err != nil {
		t.Fatalf("unexpected error fetching layer: %v", err)
	}

	checkResponse(t, "fetching layer bad digest", resp, http.StatusBadRequest)

	// Cache headers
	resp, err = http.Get(layerURL)
	if err != nil {
		t.Fatalf("unexpected error fetching layer: %v", err)
	}

	checkResponse(t, "fetching layer", resp, http.StatusOK)
	checkHeaders(t, resp, http.Header{
		"Content-Length":        []string{fmt.Sprint(layerLength)},
		"Docker-Content-Digest": []string{canonicalDigest.String()},
		"ETag":                  []string{canonicalDigest.String()},
		"Cache-Control":         []string{"max-age=31536000"},
	})

	// Matching etag, gives 304
	etag := resp.Header.Get("Etag")
	req, err = http.NewRequest("GET", layerURL, nil)
	if err != nil {
		t.Fatalf("Error constructing request: %s", err)
	}
	req.Header.Set("If-None-Match", etag)
	resp, err = http.DefaultClient.Do(req)
	if err != nil {
		t.Fatalf("Error constructing request: %s", err)
	}

	checkResponse(t, "fetching layer with etag", resp, http.StatusNotModified)

	// Non-matching etag, gives 200
	req, err = http.NewRequest("GET", layerURL, nil)
	if err != nil {
		t.Fatalf("Error constructing request: %s", err)
	}
	req.Header.Set("If-None-Match", "")
	resp, err = http.DefaultClient.Do(req)
	checkResponse(t, "fetching layer with invalid etag", resp, http.StatusOK)

	// Missing tests:
	// 	- Upload the same tarsum file under and different repository and
	//       ensure the content remains uncorrupted.
}