func (p *v2Pusher) pushLayerIfNecessary(out io.Writer, l layer.Layer) (digest.Digest, error) { logrus.Debugf("Pushing layer: %s", l.DiffID()) // Do we have any blobsums associated with this layer's DiffID? possibleBlobsums, err := p.blobSumService.GetBlobSums(l.DiffID()) if err == nil { dgst, exists, err := p.blobSumAlreadyExists(possibleBlobsums) if err != nil { out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Image push failed", nil)) return "", err } if exists { out.Write(p.sf.FormatProgress(stringid.TruncateID(string(l.DiffID())), "Layer already exists", nil)) return dgst, nil } } // if digest was empty or not saved, or if blob does not exist on the remote repository, // then push the blob. pushDigest, err := p.pushV2Layer(p.repo.Blobs(context.Background()), l) if err != nil { return "", err } // Cache mapping from this layer's DiffID to the blobsum if err := p.blobSumService.Add(l.DiffID(), pushDigest); err != nil { return "", err } return pushDigest, nil }
func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { img, err := p.config.ImageStore.Get(imgID) if err != nil { return nil, err } topLayerID := img.RootFS.ChainID() var l layer.Layer if topLayerID == "" { l = layer.EmptyLayer } else { l, err = p.config.LayerStore.Get(topLayerID) *referencedLayers = append(*referencedLayers, l) if err != nil { return nil, fmt.Errorf("failed to get top layer from image: %v", err) } } dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) if err != nil { return nil, err } topImage, err := newV1TopImage(imgID, img, l, parent) if err != nil { return nil, err } imageListForThisTag = append(dependencyImages, topImage) return }
func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { v1ID := digest.Digest(l.ChainID()).Hex() config := "" if parent != nil { config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) } else { config = fmt.Sprintf(`{"id":"%s"}`, v1ID) } return &v1DependencyImage{ v1ImageCommon: v1ImageCommon{ v1ID: v1ID, config: []byte(config), layer: l, }, }, nil }
func (p *v2Pusher) pushV2Layer(bs distribution.BlobService, l layer.Layer) (digest.Digest, error) { out := p.config.OutStream displayID := stringid.TruncateID(string(l.DiffID())) out.Write(p.sf.FormatProgress(displayID, "Preparing", nil)) arch, err := l.TarStream() if err != nil { return "", err } // Send the layer layerUpload, err := bs.Create(context.Background()) if err != nil { return "", err } defer layerUpload.Close() // don't care if this fails; best effort size, _ := l.DiffSize() reader := progressreader.New(progressreader.Config{ In: ioutil.NopCloser(arch), // we'll take care of close here. Out: out, Formatter: p.sf, Size: size, NewLines: false, ID: displayID, Action: "Pushing", }) compressedReader := compress(reader) digester := digest.Canonical.New() tee := io.TeeReader(compressedReader, digester.Hash()) out.Write(p.sf.FormatProgress(displayID, "Pushing", nil)) nn, err := layerUpload.ReadFrom(tee) compressedReader.Close() if err != nil { return "", err } dgst := digester.Digest() if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil { return "", err } logrus.Debugf("uploaded layer %s (%s), %d bytes", l.DiffID(), dgst, nn) out.Write(p.sf.FormatProgress(displayID, "Pushed", nil)) return dgst, nil }
func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { if l == nil { return nil, nil, nil } imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) if dependenciesSeen != nil { if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { // This layer is already on the list, we can ignore it // and all its parents. return imageListForThisTag, dependencyImage, nil } } dependencyImage, err := newV1DependencyImage(l, parent) if err != nil { return nil, nil, err } imageListForThisTag = append(imageListForThisTag, dependencyImage) if dependenciesSeen != nil { dependenciesSeen[l.ChainID()] = dependencyImage } return imageListForThisTag, dependencyImage, nil }
// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from // source repositories of target registry, maximum number of layer existence checks performed on the target // repository and whether the check shall be done also with digests mapped to different repositories. The // decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost // of upload does not outweigh a latency. func getMaxMountAndExistenceCheckAttempts(layer layer.Layer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { size, err := layer.DiffSize() switch { // big blob case size > middleLayerMaximumSize: // 1st attempt to mount the blob few times // 2nd few existence checks with digests associated to any repository // then fallback to upload return 4, 3, true // middle sized blobs; if we could not get the size, assume we deal with middle sized blob case size > smallLayerMaximumSize, err != nil: // 1st attempt to mount blobs of average size few times // 2nd try at most 1 existence check if there's an existing mapping to the target repository // then fallback to upload return 3, 1, false // small blobs, do a minimum number of checks default: return 1, 1, false } }
func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { logrus.Debugf("Pushing repository: %s", ref.String()) img, err := p.config.ImageStore.Get(imageID) if err != nil { return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) } var l layer.Layer topLayerID := img.RootFS.ChainID() if topLayerID == "" { l = layer.EmptyLayer } else { l, err = p.config.LayerStore.Get(topLayerID) if err != nil { return fmt.Errorf("failed to get top layer from image: %v", err) } defer layer.ReleaseAndLog(p.config.LayerStore, l) } var descriptors []xfer.UploadDescriptor descriptorTemplate := v2PushDescriptor{ v2MetadataService: p.v2MetadataService, repoInfo: p.repoInfo, repo: p.repo, pushState: &p.pushState, } // Loop bounds condition is to avoid pushing the base layer on Windows. for i := 0; i < len(img.RootFS.DiffIDs); i++ { descriptor := descriptorTemplate descriptor.layer = l descriptors = append(descriptors, &descriptor) l = l.Parent() } if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { return err } // Try schema2 first builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) manifest, err := manifestFromBuilder(ctx, builder, descriptors) if err != nil { return err } manSvc, err := p.repo.Manifests(ctx) if err != nil { return err } putOptions := []distribution.ManifestServiceOption{client.WithTag(ref.Tag())} if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) if err != nil { return err } builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON()) manifest, err = manifestFromBuilder(ctx, builder, descriptors) if err != nil { return err } if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { return err } } var canonicalManifest []byte switch v := manifest.(type) { case *schema1.SignedManifest: canonicalManifest = v.Canonical case *schema2.DeserializedManifest: _, canonicalManifest, err = v.Payload() if err != nil { return err } } manifestDigest := digest.FromBytes(canonicalManifest) progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) // Signal digest to the trust client so it can sign the // push, if appropriate. progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) return nil }
func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Association) error { ref := association.Ref logrus.Debugf("Pushing repository: %s", ref.String()) img, err := p.config.ImageStore.Get(association.ImageID) if err != nil { return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) } var l layer.Layer topLayerID := img.RootFS.ChainID() if topLayerID == "" { l = layer.EmptyLayer } else { l, err = p.config.LayerStore.Get(topLayerID) if err != nil { return fmt.Errorf("failed to get top layer from image: %v", err) } defer layer.ReleaseAndLog(p.config.LayerStore, l) } var descriptors []xfer.UploadDescriptor descriptorTemplate := v2PushDescriptor{ blobSumService: p.blobSumService, repo: p.repo, layersPushed: &p.layersPushed, confirmedV2: &p.confirmedV2, } // Push empty layer if necessary for _, h := range img.History { if h.EmptyLayer { descriptor := descriptorTemplate descriptor.layer = layer.EmptyLayer descriptors = []xfer.UploadDescriptor{&descriptor} break } } // Loop bounds condition is to avoid pushing the base layer on Windows. for i := 0; i < len(img.RootFS.DiffIDs); i++ { descriptor := descriptorTemplate descriptor.layer = l descriptors = append(descriptors, &descriptor) l = l.Parent() } fsLayers, err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput) if err != nil { return err } var tag string if tagged, isTagged := ref.(reference.NamedTagged); isTagged { tag = tagged.Tag() } m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers) if err != nil { return err } logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID()) signed, err := schema1.Sign(m, p.config.TrustKey) if err != nil { return err } manifestDigest, manifestSize, err := digestFromManifest(signed, ref) if err != nil { return err } if manifestDigest != "" { if tagged, isTagged := ref.(reference.NamedTagged); isTagged { // NOTE: do not change this format without first changing the trust client // code. This information is used to determine what was pushed and should be signed. progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", tagged.Tag(), manifestDigest, manifestSize) } } manSvc, err := p.repo.Manifests(ctx) if err != nil { return err } return manSvc.Put(signed) }
func (p *v2Pusher) pushV2Tag(association tag.Association) error { ref := association.Ref logrus.Debugf("Pushing repository: %s", ref.String()) img, err := p.config.ImageStore.Get(association.ImageID) if err != nil { return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) } out := p.config.OutStream var l layer.Layer topLayerID := img.RootFS.ChainID() if topLayerID == "" { l = layer.EmptyLayer } else { l, err = p.config.LayerStore.Get(topLayerID) if err != nil { return fmt.Errorf("failed to get top layer from image: %v", err) } defer layer.ReleaseAndLog(p.config.LayerStore, l) } fsLayers := make(map[layer.DiffID]schema1.FSLayer) // Push empty layer if necessary for _, h := range img.History { if h.EmptyLayer { dgst, err := p.pushLayerIfNecessary(out, layer.EmptyLayer) if err != nil { return err } p.layersPushed[dgst] = true fsLayers[layer.EmptyLayer.DiffID()] = schema1.FSLayer{BlobSum: dgst} break } } for i := 0; i < len(img.RootFS.DiffIDs); i++ { dgst, err := p.pushLayerIfNecessary(out, l) if err != nil { return err } p.layersPushed[dgst] = true fsLayers[l.DiffID()] = schema1.FSLayer{BlobSum: dgst} l = l.Parent() } var tag string if tagged, isTagged := ref.(reference.Tagged); isTagged { tag = tagged.Tag() } m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers) if err != nil { return err } logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID()) signed, err := schema1.Sign(m, p.config.TrustKey) if err != nil { return err } manifestDigest, manifestSize, err := digestFromManifest(signed, p.repo.Name()) if err != nil { return err } if manifestDigest != "" { if tagged, isTagged := ref.(reference.Tagged); isTagged { // NOTE: do not change this format without first changing the trust client // code. This information is used to determine what was pushed and should be signed. out.Write(p.sf.FormatStatus("", "%s: digest: %s size: %d", tagged.Tag(), manifestDigest, manifestSize)) } } manSvc, err := p.repo.Manifests(context.Background()) if err != nil { return err } return manSvc.Put(signed) }