func (p *v2Pusher) pushV2Layer(bs distribution.BlobService, l layer.Layer) (digest.Digest, error) { out := p.config.OutStream displayID := stringid.TruncateID(string(l.DiffID())) out.Write(p.sf.FormatProgress(displayID, "Preparing", nil)) arch, err := l.TarStream() if err != nil { return "", err } // Send the layer layerUpload, err := bs.Create(context.Background()) if err != nil { return "", err } defer layerUpload.Close() // don't care if this fails; best effort size, _ := l.DiffSize() reader := progressreader.New(progressreader.Config{ In: ioutil.NopCloser(arch), // we'll take care of close here. Out: out, Formatter: p.sf, Size: size, NewLines: false, ID: displayID, Action: "Pushing", }) compressedReader := compress(reader) digester := digest.Canonical.New() tee := io.TeeReader(compressedReader, digester.Hash()) out.Write(p.sf.FormatProgress(displayID, "Pushing", nil)) nn, err := layerUpload.ReadFrom(tee) compressedReader.Close() if err != nil { return "", err } dgst := digester.Digest() if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil { return "", err } logrus.Debugf("uploaded layer %s (%s), %d bytes", l.DiffID(), dgst, nn) out.Write(p.sf.FormatProgress(displayID, "Pushed", nil)) return dgst, nil }
// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from // source repositories of target registry, maximum number of layer existence checks performed on the target // repository and whether the check shall be done also with digests mapped to different repositories. The // decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost // of upload does not outweigh a latency. func getMaxMountAndExistenceCheckAttempts(layer layer.Layer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { size, err := layer.DiffSize() switch { // big blob case size > middleLayerMaximumSize: // 1st attempt to mount the blob few times // 2nd few existence checks with digests associated to any repository // then fallback to upload return 4, 3, true // middle sized blobs; if we could not get the size, assume we deal with middle sized blob case size > smallLayerMaximumSize, err != nil: // 1st attempt to mount blobs of average size few times // 2nd try at most 1 existence check if there's an existing mapping to the target repository // then fallback to upload return 3, 1, false // small blobs, do a minimum number of checks default: return 1, 1, false } }