// GetRemoteCA returns the remote endpoint's CA certificate func GetRemoteCA(ctx context.Context, d digest.Digest, picker *picker.Picker) (RootCA, error) { // We need a valid picker to be able to Dial to a remote CA if picker == nil { return RootCA{}, fmt.Errorf("valid remote address picker required") } // This TLS Config is intentionally using InsecureSkipVerify. Either we're // doing TOFU, in which case we don't validate the remote CA, or we're using // a user supplied hash to check the integrity of the CA certificate. insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) opts := []grpc.DialOption{ grpc.WithTransportCredentials(insecureCreds), grpc.WithBackoffMaxDelay(10 * time.Second), grpc.WithPicker(picker)} firstAddr, err := picker.PickAddr() if err != nil { return RootCA{}, err } conn, err := grpc.Dial(firstAddr, opts...) if err != nil { return RootCA{}, err } defer conn.Close() client := api.NewCAClient(conn) response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{}) if err != nil { return RootCA{}, err } if d != "" { verifier, err := digest.NewDigestVerifier(d) if err != nil { return RootCA{}, fmt.Errorf("unexpected error getting digest verifier: %v", err) } io.Copy(verifier, bytes.NewReader(response.Certificate)) if !verifier.Verified() { return RootCA{}, fmt.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex()) } } // Check the validity of the remote Cert _, err = helpers.ParseCertificatePEM(response.Certificate) if err != nil { return RootCA{}, err } // Create a Pool with our RootCACertificate pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(response.Certificate) { return RootCA{}, fmt.Errorf("failed to append certificate to cert pool") } return RootCA{Cert: response.Certificate, Pool: pool}, nil }
func hex(d digest.Digest) string { if d == "" { return "" } return d.Hex() }
func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) if err != nil { return 0, err } return stat.Size(), nil }
// digestPathComponents provides a consistent path breakdown for a given // digest. For a generic digest, it will be as follows: // // <algorithm>/<hex digest> // // Most importantly, for tarsum, the layout looks like this: // // tarsum/<version>/<digest algorithm>/<full digest> // // If multilevel is true, the first two bytes of the digest will separate // groups of digest folder. It will be as follows: // // <algorithm>/<first two bytes of digest>/<full digest> // func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { if err := dgst.Validate(); err != nil { return nil, err } algorithm := blobAlgorithmReplacer.Replace(dgst.Algorithm()) hex := dgst.Hex() prefix := []string{algorithm} var suffix []string if multilevel { suffix = append(suffix, hex[:2]) } suffix = append(suffix, hex) if tsi, err := digest.ParseTarSum(dgst.String()); err == nil { // We have a tarsum! version := tsi.Version if version == "" { version = "v0" } prefix = []string{ "tarsum", version, tsi.Algorithm, } } return append(prefix, suffix...), nil }
func (t *testRegistryV2) getBlobFilename(blobDigest digest.Digest) string { // Split the digest into its algorithm and hex components. dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() // The path to the target blob data looks something like: // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", t.dir, dgstAlg, dgstHex[:2], dgstHex) }
func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { img, err := s.is.Get(id) if err != nil { return nil, err } if len(img.RootFS.DiffIDs) == 0 { return nil, fmt.Errorf("empty export - not implemented") } var parent digest.Digest var layers []string var foreignSrcs map[layer.DiffID]distribution.Descriptor for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{ Created: img.Created, } if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return nil, err } v1Img.ID = v1ID.Hex() if parent != "" { v1Img.Parent = parent.Hex() } src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) if err != nil { return nil, err } layers = append(layers, v1Img.ID) parent = v1ID if src.Digest != "" { if foreignSrcs == nil { foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) } foreignSrcs[img.RootFS.DiffIDs[i]] = src } } configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { return nil, err } if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { return nil, err } s.images[id].layers = layers return foreignSrcs, nil }
func (cs *ContentStore) GetPath(dgst digest.Digest) (string, error) { p := filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) if _, err := os.Stat(p); err != nil { if os.IsNotExist(err) { return "", ErrBlobNotFound } return "", err } return p, nil }
func (s *saveSession) saveImage(id image.ID) error { img, err := s.is.Get(id) if err != nil { return err } if len(img.RootFS.DiffIDs) == 0 { return fmt.Errorf("empty export - not implemented") } var parent digest.Digest var layers []string for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{} if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return err } v1Img.ID = v1ID.Hex() if parent != "" { v1Img.Parent = parent.Hex() } if err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created); err != nil { return err } layers = append(layers, v1Img.ID) parent = v1ID } configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { return err } if err := os.Chtimes(configFile, img.Created, img.Created); err != nil { return err } s.images[id].layers = layers return nil }
// digestPathComponents provides a consistent path breakdown for a given // digest. For a generic digest, it will be as follows: // // <algorithm>/<hex digest> // // If multilevel is true, the first two bytes of the digest will separate // groups of digest folder. It will be as follows: // // <algorithm>/<first two bytes of digest>/<full digest> // func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) { if err := dgst.Validate(); err != nil { return nil, err } algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm())) hex := dgst.Hex() prefix := []string{algorithm} var suffix []string if multilevel { suffix = append(suffix, hex[:2]) } suffix = append(suffix, hex) return append(prefix, suffix...), nil }
func checkBlobPath(t *testing.T, cs *ContentStore, dgst digest.Digest) string { path, err := cs.GetPath(dgst) if err != nil { t.Fatal(err, dgst) } if path != filepath.Join(cs.root, "blobs", dgst.Algorithm().String(), dgst.Hex()) { t.Fatalf("unxpected path: %q", path) } fi, err := os.Stat(path) if err != nil { t.Fatalf("error stating blob path: %v", err) } // ensure that only read bits are set. if ((fi.Mode() & os.ModePerm) & 0333) != 0 { t.Fatalf("incorrect permissions: %v", fi.Mode()) } return path }
func (b *buildCache) getParentChain(ctx context.Context, dir string, id digest.Digest) ([]image, error) { config, err := ioutil.ReadFile(filepath.Join(dir, "imagedb/content/sha256", id.Hex())) if err := ctx.Err(); err != nil { return nil, err } if err != nil { return nil, err } img, err := parseImage(config) if err != nil { return nil, err } if img.id != id { return nil, fmt.Errorf("invalid configuration for %v, got id %v", id, img.id) } parent, err := ioutil.ReadFile(filepath.Join(dir, "imagedb/metadata/sha256", id.Hex(), "parent")) if err := ctx.Err(); err != nil { return nil, err } if err != nil { if os.IsNotExist(err) { return []image{*img}, nil } return nil, err } parentID, err := digest.ParseDigest(string(parent)) if err != nil { return nil, err } img.parent = parentID pc, err := b.getParentChain(ctx, dir, parentID) if err != nil { return nil, err } return append([]image{*img}, pc...), nil }
// WriteBlob writes data with the expected digest into the content store. If // expected already exists, the method returns immediately and the reader will // not be consumed. // // This is useful when the digest and size are known beforehand. // // Copy is buffered, so no need to wrap reader in buffered io. func WriteBlob(cs *ContentStore, r io.Reader, size int64, expected digest.Digest) error { cw, err := cs.Begin(expected.Hex()) if err != nil { return err } buf := bufPool.Get().([]byte) defer bufPool.Put(buf) nn, err := io.CopyBuffer(cw, r, buf) if err != nil { return err } if nn != size { return errors.Errorf("failed size verification: %v != %v", nn, size) } if err := cw.Commit(size, expected); err != nil { return err } return nil }
func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) }
// GetRemoteCA returns the remote endpoint's CA certificate func GetRemoteCA(ctx context.Context, d digest.Digest, r remotes.Remotes) (RootCA, error) { // This TLS Config is intentionally using InsecureSkipVerify. We use the // digest instead to check the integrity of the CA certificate. insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) conn, peer, err := getGRPCConnection(insecureCreds, r) if err != nil { return RootCA{}, err } defer conn.Close() client := api.NewCAClient(conn) ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() defer func() { if err != nil { r.Observe(peer, -remotes.DefaultObservationWeight) return } r.Observe(peer, remotes.DefaultObservationWeight) }() response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{}) if err != nil { return RootCA{}, err } if d != "" { verifier, err := digest.NewDigestVerifier(d) if err != nil { return RootCA{}, errors.Wrap(err, "unexpected error getting digest verifier") } io.Copy(verifier, bytes.NewReader(response.Certificate)) if !verifier.Verified() { return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex()) } } // Check the validity of the remote Cert _, err = helpers.ParseCertificatePEM(response.Certificate) if err != nil { return RootCA{}, err } // Create a Pool with our RootCACertificate pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(response.Certificate) { return RootCA{}, errors.New("failed to append certificate to cert pool") } return RootCA{Cert: response.Certificate, Digest: digest.FromBytes(response.Certificate), Pool: pool}, nil }
// Build produces a final manifest from the given references func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { type imageRootFS struct { Type string `json:"type"` DiffIDs []diffID `json:"diff_ids,omitempty"` BaseLayer string `json:"base_layer,omitempty"` } type imageHistory struct { Created time.Time `json:"created"` Author string `json:"author,omitempty"` CreatedBy string `json:"created_by,omitempty"` Comment string `json:"comment,omitempty"` EmptyLayer bool `json:"empty_layer,omitempty"` } type imageConfig struct { RootFS *imageRootFS `json:"rootfs,omitempty"` History []imageHistory `json:"history,omitempty"` Architecture string `json:"architecture,omitempty"` } var img imageConfig if err := json.Unmarshal(mb.configJSON, &img); err != nil { return nil, err } if len(img.History) == 0 { return nil, errors.New("empty history when trying to create schema1 manifest") } if len(img.RootFS.DiffIDs) != len(mb.descriptors) { return nil, fmt.Errorf("number of descriptors and number of layers in rootfs must match: len(%v) != len(%v)", img.RootFS.DiffIDs, mb.descriptors) } // Generate IDs for each layer // For non-top-level layers, create fake V1Compatibility strings that // fit the format and don't collide with anything else, but don't // result in runnable images on their own. type v1Compatibility struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` ContainerConfig struct { Cmd []string } `json:"container_config,omitempty"` Author string `json:"author,omitempty"` ThrowAway bool `json:"throwaway,omitempty"` } fsLayerList := make([]FSLayer, len(img.History)) history := make([]History, len(img.History)) parent := "" layerCounter := 0 for i, h := range img.History[:len(img.History)-1] { var blobsum digest.Digest if h.EmptyLayer { if blobsum, err = mb.emptyTar(ctx); err != nil { return nil, err } } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } blobsum = mb.descriptors[layerCounter].Digest layerCounter++ } v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() if i == 0 && img.RootFS.BaseLayer != "" { // windows-only baselayer setup baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) parent = fmt.Sprintf("%x", baseID[:32]) } v1Compatibility := v1Compatibility{ ID: v1ID, Parent: parent, Comment: h.Comment, Created: h.Created, Author: h.Author, } v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} if h.EmptyLayer { v1Compatibility.ThrowAway = true } jsonBytes, err := json.Marshal(&v1Compatibility) if err != nil { return nil, err } reversedIndex := len(img.History) - i - 1 history[reversedIndex].V1Compatibility = string(jsonBytes) fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} parent = v1ID } latestHistory := img.History[len(img.History)-1] var blobsum digest.Digest if latestHistory.EmptyLayer { if blobsum, err = mb.emptyTar(ctx); err != nil { return nil, err } } else { if len(img.RootFS.DiffIDs) <= layerCounter { return nil, errors.New("too many non-empty layers in History section") } blobsum = mb.descriptors[layerCounter].Digest } fsLayerList[0] = FSLayer{BlobSum: blobsum} dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) // Top-level v1compatibility string should be a modified version of the // image config. transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) if err != nil { return nil, err } history[0].V1Compatibility = string(transformedConfig) tag := "" if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { tag = tagged.Tag() } mfst := Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: mb.ref.Name(), Tag: tag, Architecture: img.Architecture, FSLayers: fsLayerList, History: history, } return Sign(&mfst, mb.pk) }
func (ic *ImageCache) imageFile(dgst digest.Digest) string { return filepath.Join(ic.root, dgst.Algorithm().String(), dgst.Hex()) }
func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { return string(dgst.Algorithm()) + "/" + dgst.Hex() }
func (s *fs) contentFile(dgst digest.Digest) string { return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) }
func (s *fs) metadataDir(dgst digest.Digest) string { return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) }
func (blobserv *BlobSumService) blobSumKey(blobsum digest.Digest) string { return string(blobsum.Algorithm()) + "/" + blobsum.Hex() }
// GetRemoteCA returns the remote endpoint's CA certificate func GetRemoteCA(ctx context.Context, d digest.Digest, r remotes.Remotes) (RootCA, error) { // This TLS Config is intentionally using InsecureSkipVerify. Either we're // doing TOFU, in which case we don't validate the remote CA, or we're using // a user supplied hash to check the integrity of the CA certificate. insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) opts := []grpc.DialOption{ grpc.WithTransportCredentials(insecureCreds), grpc.WithTimeout(5 * time.Second), grpc.WithBackoffMaxDelay(5 * time.Second), } peer, err := r.Select() if err != nil { return RootCA{}, err } conn, err := grpc.Dial(peer.Addr, opts...) if err != nil { return RootCA{}, err } defer conn.Close() client := api.NewCAClient(conn) ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() defer func() { if err != nil { r.Observe(peer, -remotes.DefaultObservationWeight) return } r.Observe(peer, remotes.DefaultObservationWeight) }() response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{}) if err != nil { return RootCA{}, err } if d != "" { verifier, err := digest.NewDigestVerifier(d) if err != nil { return RootCA{}, errors.Wrap(err, "unexpected error getting digest verifier") } io.Copy(verifier, bytes.NewReader(response.Certificate)) if !verifier.Verified() { return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex()) } } // Check the validity of the remote Cert _, err = helpers.ParseCertificatePEM(response.Certificate) if err != nil { return RootCA{}, err } // Create a Pool with our RootCACertificate pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(response.Certificate) { return RootCA{}, errors.New("failed to append certificate to cert pool") } return RootCA{Cert: response.Certificate, Digest: digest.FromBytes(response.Certificate), Pool: pool}, nil }