Ejemplo n.º 1
0
func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) {
	l := v1Image.Layer()
	v1ID := v1Image.V1ID()
	truncID := stringid.TruncateID(l.DiffID().String())

	jsonRaw := v1Image.Config()
	progress.Update(p.config.ProgressOutput, truncID, "Pushing")

	// General rule is to use ID for graph accesses and compatibilityID for
	// calls to session.registry()
	imgData := &registry.ImgData{
		ID: v1ID,
	}

	// Send the json
	if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil {
		if err == registry.ErrAlreadyExists {
			progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping")
			return "", nil
		}
		return "", err
	}

	arch, err := l.TarStream()
	if err != nil {
		return "", err
	}
	defer arch.Close()

	// don't care if this fails; best effort
	size, _ := l.DiffSize()

	// Send the layer
	logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size)

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing")
	defer reader.Close()

	checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw)
	if err != nil {
		return "", err
	}
	imgData.Checksum = checksum
	imgData.ChecksumPayload = checksumPayload
	// Send the checksum
	if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil {
		return "", err
	}

	if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil {
		logrus.Warnf("Could not set v1 ID mapping: %v", err)
	}

	progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed")
	return imgData.Checksum, nil
}
Ejemplo n.º 2
0
func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error {
	if img.Tag == "" {
		logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID)
		return nil
	}

	localNameRef, err := reference.WithTag(p.repoInfo.LocalName, img.Tag)
	if err != nil {
		retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag)
		logrus.Debug(retErr.Error())
		return retErr
	}

	if err := v1.ValidateID(img.ID); err != nil {
		return err
	}

	progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.CanonicalName.Name())
	success := false
	var lastErr error
	for _, ep := range p.repoInfo.Index.Mirrors {
		ep += "v1/"
		progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep))
		if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
			// Don't report errors when pulling from mirrors.
			logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep, err)
			continue
		}
		success = true
		break
	}
	if !success {
		for _, ep := range repoData.Endpoints {
			progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep)
			if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil {
				// It's not ideal that only the last error is returned, it would be better to concatenate the errors.
				// As the error is also given to the output stream the user will see the error.
				lastErr = err
				progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.CanonicalName.Name(), ep, err)
				continue
			}
			success = true
			break
		}
	}
	if !success {
		err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.CanonicalName.Name(), lastErr)
		progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error())
		return err
	}
	progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Download complete")
	return nil
}
Ejemplo n.º 3
0
// lookupImageOnEndpoint checks the specified endpoint to see if an image exists
// and if it is absent then it sends the image id to the channel to be pushed.
func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) {
	defer wg.Done()
	for image := range images {
		v1ID := image.V1ID()
		truncID := stringid.TruncateID(image.Layer().DiffID().String())
		if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil {
			logrus.Errorf("Error in LookupRemoteImage: %s", err)
			imagesToPush <- v1ID
			progress.Update(p.config.ProgressOutput, truncID, "Waiting")
		} else {
			progress.Update(p.config.ProgressOutput, truncID, "Already exists")
		}
	}
}
Ejemplo n.º 4
0
func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
	progress.Update(progressOutput, ld.ID(), "Pulling fs layer")
	layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize)
	if err != nil {
		progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers")
		if uerr, ok := err.(*url.Error); ok {
			err = uerr.Err
		}
		if terr, ok := err.(net.Error); ok && terr.Timeout() {
			return nil, 0, err
		}
		return nil, 0, xfer.DoNotRetry{Err: err}
	}
	*ld.layersDownloaded = true

	ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		layerReader.Close()
		return nil, 0, err
	}

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading")
	defer reader.Close()

	_, err = io.Copy(ld.tmpFile, reader)
	if err != nil {
		ld.Close()
		return nil, 0, err
	}

	progress.Update(progressOutput, ld.ID(), "Download complete")

	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())

	ld.tmpFile.Seek(0, 0)

	// hand off the temporary file to the download manager, so it will only
	// be closed once
	tmpFile := ld.tmpFile
	ld.tmpFile = nil

	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
		tmpFile.Close()
		err := os.RemoveAll(tmpFile.Name())
		if err != nil {
			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
		}
		return err
	}), ld.layerSize, nil
}
Ejemplo n.º 5
0
// Upload is a blocking function which ensures the listed layers are present on
// the remote registry. It uses the string returned by the Key method to
// deduplicate uploads.
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
	var (
		uploads          []*uploadTransfer
		dedupDescriptors = make(map[string]struct{})
	)

	for _, descriptor := range layers {
		progress.Update(progressOutput, descriptor.ID(), "Preparing")

		key := descriptor.Key()
		if _, present := dedupDescriptors[key]; present {
			continue
		}
		dedupDescriptors[key] = struct{}{}

		xferFunc := lum.makeUploadFunc(descriptor)
		upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput)
		defer upload.Release(watcher)
		uploads = append(uploads, upload.(*uploadTransfer))
	}

	for _, upload := range uploads {
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-upload.Transfer.Done():
			if upload.err != nil {
				return upload.err
			}
		}
	}

	return nil
}
Ejemplo n.º 6
0
func (pd *v2PushDescriptor) uploadUsingSession(
	ctx context.Context,
	progressOutput progress.Output,
	diffID layer.DiffID,
	layerUpload distribution.BlobWriter,
) (distribution.Descriptor, error) {
	arch, err := pd.layer.TarStream()
	if err != nil {
		return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
	}

	// don't care if this fails; best effort
	size, _ := pd.layer.DiffSize()

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing")
	compressedReader, compressionDone := compress(reader)
	defer func() {
		reader.Close()
		<-compressionDone
	}()

	digester := digest.Canonical.New()
	tee := io.TeeReader(compressedReader, digester.Hash())

	nn, err := layerUpload.ReadFrom(tee)
	compressedReader.Close()
	if err != nil {
		return distribution.Descriptor{}, retryOnError(err)
	}

	pushDigest := digester.Digest()
	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
		return distribution.Descriptor{}, retryOnError(err)
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
	progress.Update(progressOutput, pd.ID(), "Pushed")

	// Cache mapping from this layer's DiffID to the blobsum
	if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
		Digest:           pushDigest,
		SourceRepository: pd.repoInfo.FullName(),
	}); err != nil {
		return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
	}

	desc := distribution.Descriptor{
		Digest:    pushDigest,
		MediaType: schema2.MediaTypeLayer,
		Size:      nn,
	}

	pd.pushState.Lock()
	// If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol.
	pd.pushState.confirmedV2 = true
	pd.pushState.remoteLayers[diffID] = desc
	pd.pushState.Unlock()

	return desc, nil
}
Ejemplo n.º 7
0
func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) {
	progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata")

	retries := 5
	for j := 1; j <= retries; j++ {
		imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint)
		if err != nil && j == retries {
			progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata")
			return nil, 0, err
		} else if err != nil {
			time.Sleep(time.Duration(j) * 500 * time.Millisecond)
			continue
		}

		return imgJSON, imgSize, nil
	}

	// not reached
	return nil, 0, nil
}
Ejemplo n.º 8
0
func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
	progress.Update(progressOutput, ld.ID(), "Pulling fs layer")
	layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize)
	if err != nil {
		progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers")
		if uerr, ok := err.(*url.Error); ok {
			err = uerr.Err
		}
		if terr, ok := err.(net.Error); ok && terr.Timeout() {
			return nil, 0, err
		}
		return nil, 0, xfer.DoNotRetry{Err: err}
	}
	*ld.layersDownloaded = true

	ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		layerReader.Close()
		return nil, 0, err
	}

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading")
	defer reader.Close()

	_, err = io.Copy(ld.tmpFile, reader)
	if err != nil {
		ld.Close()
		return nil, 0, err
	}

	progress.Update(progressOutput, ld.ID(), "Download complete")

	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name())

	ld.tmpFile.Seek(0, 0)
	return ld.tmpFile, ld.layerSize, nil
}
Ejemplo n.º 9
0
// WriteImageBlobs writes the image blob to the storage layer
func WriteImageBlobs(images []*ImageWithMeta) error {
	if options.standalone {
		return nil
	}

	// iterate from parent to children
	// so that portlayer can extract each layer
	// on top of previous one
	destination := DestinationDirectory()
	for i := len(images) - 1; i >= 0; i-- {
		image := images[i]

		id := image.Image.ID
		f, err := os.Open(path.Join(destination, id, id+".tar"))
		if err != nil {
			return fmt.Errorf("Failed to open file: %s", err)
		}
		defer f.Close()

		fi, err := f.Stat()
		if err != nil {
			return fmt.Errorf("Failed to stat file: %s", err)
		}

		in := progress.NewProgressReader(
			ioutils.NewCancelReadCloser(context.Background(), f),
			po,
			fi.Size(),
			image.String(),
			"Extracting",
		)
		defer in.Close()

		// Write the image
		err = WriteImage(image, in)
		if err != nil {
			return fmt.Errorf("Failed to write to image store: %s", err)
		}
		progress.Update(po, image.String(), "Pull complete")
	}
	if err := os.RemoveAll(destination); err != nil {
		return fmt.Errorf("Failed to remove download directory: %s", err)
	}
	return nil
}
Ejemplo n.º 10
0
Archivo: imagec.go Proyecto: vmware/vic
// WriteImageBlob writes the image blob to the storage layer
func (ic *ImageC) WriteImageBlob(image *ImageWithMeta, progressOutput progress.Output, cleanup bool) error {
	defer trace.End(trace.Begin(image.Image.ID))

	destination := DestinationDirectory(ic.Options)

	id := image.Image.ID
	log.Infof("Path: %s", path.Join(destination, id, id+".targ"))
	f, err := os.Open(path.Join(destination, id, id+".tar"))
	if err != nil {
		return fmt.Errorf("Failed to open file: %s", err)
	}
	defer f.Close()

	fi, err := f.Stat()
	if err != nil {
		return fmt.Errorf("Failed to stat file: %s", err)
	}

	in := progress.NewProgressReader(
		ioutils.NewCancelReadCloser(context.Background(), f),
		progressOutput,
		fi.Size(),
		image.String(),
		"Extracting",
	)
	defer in.Close()

	// Write the image
	err = WriteImage(ic.Host, image, in)
	if err != nil {
		return fmt.Errorf("Failed to write to image store: %s", err)
	}

	progress.Update(progressOutput, image.String(), "Pull complete")

	if cleanup {
		if err := os.RemoveAll(destination); err != nil {
			return fmt.Errorf("Failed to remove download directory: %s", err)
		}
	}
	return nil
}
Ejemplo n.º 11
0
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
	if fs, ok := pd.layer.(distribution.Describable); ok {
		if d := fs.Descriptor(); len(d.URLs) > 0 {
			progress.Update(progressOutput, pd.ID(), "Skipped foreign layer")
			return d, nil
		}
	}

	diffID := pd.DiffID()

	pd.pushState.Lock()
	if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok {
		// it is already known that the push is not needed and
		// therefore doing a stat is unnecessary
		pd.pushState.Unlock()
		progress.Update(progressOutput, pd.ID(), "Layer already exists")
		return descriptor, nil
	}
	pd.pushState.Unlock()

	maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer)

	// Do we have any metadata associated with this layer's DiffID?
	v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
	if err == nil {
		// check for blob existence in the target repository if we have a mapping with it
		descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, false, 1, v2Metadata)
		if exists || err != nil {
			return descriptor, err
		}
	}

	// if digest was empty or not saved, or if blob does not exist on the remote repository,
	// then push the blob.
	bs := pd.repo.Blobs(ctx)

	var layerUpload distribution.BlobWriter

	// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
	candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata)
	for _, mountCandidate := range candidates {
		logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository)
		createOpts := []distribution.BlobCreateOption{}

		if len(mountCandidate.SourceRepository) > 0 {
			namedRef, err := reference.WithName(mountCandidate.SourceRepository)
			if err != nil {
				logrus.Errorf("failed to parse source repository reference %v: %v", namedRef.String(), err)
				pd.v2MetadataService.Remove(mountCandidate)
				continue
			}

			// TODO (brianbland): We need to construct a reference where the Name is
			// only the full remote name, so clean this up when distribution has a
			// richer reference package
			remoteRef, err := distreference.WithName(namedRef.RemoteName())
			if err != nil {
				logrus.Errorf("failed to make remote reference out of %q: %v", namedRef.RemoteName(), namedRef.RemoteName())
				continue
			}

			canonicalRef, err := distreference.WithDigest(distreference.TrimNamed(remoteRef), mountCandidate.Digest)
			if err != nil {
				logrus.Errorf("failed to make canonical reference: %v", err)
				continue
			}

			createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
		}

		// send the layer
		lu, err := bs.Create(ctx, createOpts...)
		switch err := err.(type) {
		case nil:
			// noop
		case distribution.ErrBlobMounted:
			progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())

			err.Descriptor.MediaType = schema2.MediaTypeLayer

			pd.pushState.Lock()
			pd.pushState.confirmedV2 = true
			pd.pushState.remoteLayers[diffID] = err.Descriptor
			pd.pushState.Unlock()

			// Cache mapping from this layer's DiffID to the blobsum
			if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
				Digest:           err.Descriptor.Digest,
				SourceRepository: pd.repoInfo.FullName(),
			}); err != nil {
				return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
			}
			return err.Descriptor, nil
		default:
			logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err)
		}

		if len(mountCandidate.SourceRepository) > 0 &&
			(metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) ||
				len(mountCandidate.HMAC) == 0) {
			cause := "blob mount failure"
			if err != nil {
				cause = fmt.Sprintf("an error: %v", err.Error())
			}
			logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause)
			pd.v2MetadataService.Remove(mountCandidate)
		}

		if lu != nil {
			// cancel previous upload
			cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload)
			layerUpload = lu
		}
	}

	if maxExistenceChecks-len(pd.checkedDigests) > 0 {
		// do additional layer existence checks with other known digests if any
		descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata)
		if exists || err != nil {
			return descriptor, err
		}
	}

	logrus.Debugf("Pushing layer: %s", diffID)
	if layerUpload == nil {
		layerUpload, err = bs.Create(ctx)
		if err != nil {
			return distribution.Descriptor{}, retryOnError(err)
		}
	}
	defer layerUpload.Close()

	// upload the blob
	desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
	if err != nil {
		return desc, err
	}

	return desc, nil
}
Ejemplo n.º 12
0
// DownloadLayers ensures layers end up in the portlayer's image store
// It handles existing and simultaneous layer download de-duplication
// This code is utilizes Docker's xfer package: https://github.com/docker/docker/tree/v1.11.2/distribution/xfer
func (ldm *LayerDownloader) DownloadLayers(ctx context.Context, ic *ImageC) error {
	defer trace.End(trace.Begin(""))

	var (
		topDownload    *downloadTransfer
		watcher        *xfer.Watcher
		d              xfer.Transfer
		layerCount     = 0
		sf             = streamformatter.NewJSONStreamFormatter()
		progressOutput = &serialProgressOutput{
			c:   make(chan prog, 100),
			out: sf.NewProgressOutput(ic.Outstream, false),
		}
	)

	go progressOutput.run()
	defer progressOutput.stop()

	// lock here so that we get all layers in flight before another client comes along
	ldm.m.Lock()

	// Grab the imageLayers
	layers := ic.ImageLayers

	// iterate backwards through layers to download
	for i := len(layers) - 1; i >= 0; i-- {

		layer := layers[i]
		id := layer.ID

		layerConfig, err := LayerCache().Get(id)
		if err != nil {

			switch err := err.(type) {
			case LayerNotFoundError:

				layerCount++

				// layer does not already exist in store and is not currently in flight, so download it
				progress.Update(progressOutput, layer.String(), "Pulling fs layer")

				xferFunc := ldm.makeDownloadFunc(layer, ic, topDownload, layers)
				d, watcher = ldm.tm.Transfer(id, xferFunc, progressOutput)
				topDownload = d.(*downloadTransfer)

				defer topDownload.Transfer.Release(watcher)

				ldm.registerDownload(topDownload)
				layer.Downloading = true
				LayerCache().Add(layer)

				continue
			default:
				return err
			}
		}

		if layerConfig.Downloading {

			layerCount++

			if existingDownload, ok := ldm.downloadsByID[id]; ok {

				xferFunc := ldm.makeDownloadFuncFromDownload(layer, existingDownload, topDownload, layers)
				d, watcher = ldm.tm.Transfer(id, xferFunc, progressOutput)
				topDownload = d.(*downloadTransfer)

				defer topDownload.Transfer.Release(watcher)

			}
			continue
		}

		progress.Update(progressOutput, layer.String(), "Already exists")
	}

	ldm.m.Unlock()

	// each layer download will block until the parent download finishes,
	// so this will block until the child-most layer, and thus all layers, have finished downloading
	if layerCount > 0 {
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-topDownload.Done():
		default:
			<-topDownload.Done()
		}
		err := topDownload.result()
		if err != nil {
			return err
		}
	} else {
		if err := updateRepositoryCache(ic); err != nil {
			return err
		}
	}

	progress.Message(progressOutput, "", "Digest: "+ic.ImageManifest.Digest)

	if layerCount > 0 {
		progress.Message(progressOutput, "", "Status: Downloaded newer image for "+ic.Image+":"+ic.Tag)
	} else {
		progress.Message(progressOutput, "", "Status: Image is up to date for "+ic.Image+":"+ic.Tag)
	}

	return nil
}
Ejemplo n.º 13
0
func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) {
	var history []string
	history, err = p.session.GetRemoteHistory(v1ID, endpoint)
	if err != nil {
		return err
	}
	if len(history) < 1 {
		return fmt.Errorf("empty history for image %s", v1ID)
	}
	progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers")

	var (
		descriptors []xfer.DownloadDescriptor
		newHistory  []image.History
		imgJSON     []byte
		imgSize     int64
	)

	// Iterate over layers, in order from bottom-most to top-most. Download
	// config for all layers and create descriptors.
	for i := len(history) - 1; i >= 0; i-- {
		v1LayerID := history[i]
		imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint)
		if err != nil {
			return err
		}

		// Create a new-style config from the legacy configs
		h, err := v1.HistoryFromConfig(imgJSON, false)
		if err != nil {
			return err
		}
		newHistory = append(newHistory, h)

		layerDescriptor := &v1LayerDescriptor{
			v1LayerID:        v1LayerID,
			indexName:        p.repoInfo.Index.Name,
			endpoint:         endpoint,
			v1IDService:      p.v1IDService,
			layersDownloaded: layersDownloaded,
			layerSize:        imgSize,
			session:          p.session,
		}

		descriptors = append(descriptors, layerDescriptor)
	}

	rootFS := image.NewRootFS()
	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
	if err != nil {
		return err
	}
	defer release()

	config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory)
	if err != nil {
		return err
	}

	imageID, err := p.config.ImageStore.Create(config)
	if err != nil {
		return err
	}

	if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil {
		return err
	}

	return nil
}
Ejemplo n.º 14
0
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
	diffID := pd.DiffID()

	pd.pushState.Lock()
	if _, ok := pd.pushState.remoteLayers[diffID]; ok {
		// it is already known that the push is not needed and
		// therefore doing a stat is unnecessary
		pd.pushState.Unlock()
		progress.Update(progressOutput, pd.ID(), "Layer already exists")
		return nil
	}
	pd.pushState.Unlock()

	// Do we have any metadata associated with this layer's DiffID?
	v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
	if err == nil {
		descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
		if err != nil {
			progress.Update(progressOutput, pd.ID(), "Image push failed")
			return retryOnError(err)
		}
		if exists {
			progress.Update(progressOutput, pd.ID(), "Layer already exists")
			pd.pushState.Lock()
			pd.pushState.remoteLayers[diffID] = descriptor
			pd.pushState.Unlock()
			return nil
		}
	}

	logrus.Debugf("Pushing layer: %s", diffID)

	// if digest was empty or not saved, or if blob does not exist on the remote repository,
	// then push the blob.
	bs := pd.repo.Blobs(ctx)

	var mountFrom metadata.V2Metadata

	// Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload
	for _, metadata := range v2Metadata {
		sourceRepo, err := reference.ParseNamed(metadata.SourceRepository)
		if err != nil {
			continue
		}
		if pd.repoInfo.Hostname() == sourceRepo.Hostname() {
			logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, metadata.Digest, sourceRepo.FullName())
			mountFrom = metadata
			break
		}
	}

	var createOpts []distribution.BlobCreateOption

	if mountFrom.SourceRepository != "" {
		namedRef, err := reference.WithName(mountFrom.SourceRepository)
		if err != nil {
			return err
		}

		// TODO (brianbland): We need to construct a reference where the Name is
		// only the full remote name, so clean this up when distribution has a
		// richer reference package
		remoteRef, err := distreference.WithName(namedRef.RemoteName())
		if err != nil {
			return err
		}

		canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
		if err != nil {
			return err
		}

		createOpts = append(createOpts, client.WithMountFrom(canonicalRef))
	}

	// Send the layer
	layerUpload, err := bs.Create(ctx, createOpts...)
	switch err := err.(type) {
	case distribution.ErrBlobMounted:
		progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())

		err.Descriptor.MediaType = schema2.MediaTypeLayer

		pd.pushState.Lock()
		pd.pushState.confirmedV2 = true
		pd.pushState.remoteLayers[diffID] = err.Descriptor
		pd.pushState.Unlock()

		// Cache mapping from this layer's DiffID to the blobsum
		if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
			return xfer.DoNotRetry{Err: err}
		}

		return nil
	}
	if mountFrom.SourceRepository != "" {
		// unable to mount layer from this repository, so this source mapping is no longer valid
		logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
		pd.v2MetadataService.Remove(mountFrom)
	}

	if err != nil {
		return retryOnError(err)
	}
	defer layerUpload.Close()

	arch, err := pd.layer.TarStream()
	if err != nil {
		return xfer.DoNotRetry{Err: err}
	}

	// don't care if this fails; best effort
	size, _ := pd.layer.DiffSize()

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing")
	compressedReader, compressionDone := compress(reader)
	defer func() {
		reader.Close()
		<-compressionDone
	}()

	digester := digest.Canonical.New()
	tee := io.TeeReader(compressedReader, digester.Hash())

	nn, err := layerUpload.ReadFrom(tee)
	compressedReader.Close()
	if err != nil {
		return retryOnError(err)
	}

	pushDigest := digester.Digest()
	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
		return retryOnError(err)
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
	progress.Update(progressOutput, pd.ID(), "Pushed")

	// Cache mapping from this layer's DiffID to the blobsum
	if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
		return xfer.DoNotRetry{Err: err}
	}

	pd.pushState.Lock()

	// If Commit succeded, that's an indication that the remote registry
	// speaks the v2 protocol.
	pd.pushState.confirmedV2 = true

	pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
		Digest:    pushDigest,
		MediaType: schema2.MediaTypeLayer,
		Size:      nn,
	}

	pd.pushState.Unlock()

	return nil
}
Ejemplo n.º 15
0
func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
	logrus.Debugf("pulling blob %q", ld.digest)

	var (
		err    error
		offset int64
	)

	if ld.tmpFile == nil {
		ld.tmpFile, err = createDownloadFile()
		if err != nil {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
	} else {
		offset, err = ld.tmpFile.Seek(0, os.SEEK_END)
		if err != nil {
			logrus.Debugf("error seeking to end of download file: %v", err)
			offset = 0

			ld.tmpFile.Close()
			if err := os.Remove(ld.tmpFile.Name()); err != nil {
				logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name())
			}
			ld.tmpFile, err = createDownloadFile()
			if err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
		} else if offset != 0 {
			logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset)
		}
	}

	tmpFile := ld.tmpFile

	layerDownload, err := ld.open(ctx)
	if err != nil {
		logrus.Errorf("Error initiating layer download: %v", err)
		if err == distribution.ErrBlobUnknown {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
		return nil, 0, retryOnError(err)
	}

	if offset != 0 {
		_, err := layerDownload.Seek(offset, os.SEEK_SET)
		if err != nil {
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
			return nil, 0, err
		}
	}
	size, err := layerDownload.Seek(0, os.SEEK_END)
	if err != nil {
		// Seek failed, perhaps because there was no Content-Length
		// header. This shouldn't fail the download, because we can
		// still continue without a progress bar.
		size = 0
	} else {
		if size != 0 && offset > size {
			logrus.Debug("Partial download is larger than full blob. Starting over")
			offset = 0
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
		}

		// Restore the seek offset either at the beginning of the
		// stream, or just after the last byte we have from previous
		// attempts.
		_, err = layerDownload.Seek(offset, os.SEEK_SET)
		if err != nil {
			return nil, 0, err
		}
	}

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading")
	defer reader.Close()

	if ld.verifier == nil {
		ld.verifier, err = digest.NewDigestVerifier(ld.digest)
		if err != nil {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
	}

	_, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier))
	if err != nil {
		if err == transport.ErrWrongCodeForByteRange {
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}
			return nil, 0, err
		}
		return nil, 0, retryOnError(err)
	}

	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")

	if !ld.verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
		logrus.Error(err)

		// Allow a retry if this digest verification error happened
		// after a resumed download.
		if offset != 0 {
			if err := ld.truncateDownloadFile(); err != nil {
				return nil, 0, xfer.DoNotRetry{Err: err}
			}

			return nil, 0, err
		}
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	progress.Update(progressOutput, ld.ID(), "Download complete")

	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())

	_, err = tmpFile.Seek(0, os.SEEK_SET)
	if err != nil {
		tmpFile.Close()
		if err := os.Remove(tmpFile.Name()); err != nil {
			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
		}
		ld.tmpFile = nil
		ld.verifier = nil
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	// hand off the temporary file to the download manager, so it will only
	// be closed once
	ld.tmpFile = nil

	return ioutils.NewReadCloserWrapper(tmpFile, func() error {
		tmpFile.Close()
		err := os.RemoveAll(tmpFile.Name())
		if err != nil {
			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
		}
		return err
	}), size, nil
}
Ejemplo n.º 16
0
// makeDownloadFunc returns a func used by xfer.TransferManager to download a layer
func (ldm *LayerDownloader) makeDownloadFunc(layer *ImageWithMeta, ic *ImageC, parentDownload *downloadTransfer, layers []*ImageWithMeta) xfer.DoFunc {
	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) xfer.Transfer {

		d := &downloadTransfer{
			Transfer: xfer.NewTransfer(),
			layer:    layer,
		}

		go func() {

			defer func() {
				close(progressChan)

				// remove layer from cache if there was an error attempting to download
				if d.err != nil {
					LayerCache().Remove(layer.ID)
				}

			}()

			progressOutput := progress.ChanOutput(progressChan)

			// wait for TransferManager to give the go-ahead
			select {
			case <-start:
			default:
				progress.Update(progressOutput, layer.String(), "Waiting")
				<-start
			}

			if parentDownload != nil {
				// bail if parent download failed or was cancelled
				select {
				case <-parentDownload.Done():
					if err := parentDownload.result(); err != nil {
						d.err = err
						return
					}
				default:
				}
			}

			// fetch blob
			diffID, err := FetchImageBlob(d.Transfer.Context(), ic.Options, layer, progressOutput)
			if err != nil {
				d.err = fmt.Errorf("%s/%s returned %s", ic.Image, layer.ID, err)
				return
			}

			layer.DiffID = diffID

			close(inactive)

			if parentDownload != nil {
				select {
				case <-d.Transfer.Context().Done():
					d.err = errors.New("layer download cancelled")
					return
				default:
					<-parentDownload.Done() // block until parent download completes
				}

				if err := parentDownload.result(); err != nil {
					d.err = err
					return
				}
			}

			// is this the leaf layer?
			imageLayer := layer.ID == layers[0].ID

			// if this is the leaf layer, we are done and can now create the image config
			if imageLayer {
				imageConfig, err := ic.CreateImageConfig(layers)
				if err != nil {
					d.err = err
					return
				}
				// cache and persist the image
				cache.ImageCache().Add(&imageConfig)
				cache.ImageCache().Save()

				// place calculated ImageID in struct
				ic.ImageID = imageConfig.ImageID

				if err = updateRepositoryCache(ic); err != nil {
					d.err = err
					return
				}

			}

			ldm.m.Lock()
			defer ldm.m.Unlock()

			// Write blob to the storage layer
			if err := ic.WriteImageBlob(layer, progressOutput, imageLayer); err != nil {
				d.err = err
				return
			}

			// mark the layer as finished downloading
			LayerCache().Commit(layer)

			ldm.unregisterDownload(layer)

		}()

		return d
	}
}
Ejemplo n.º 17
0
Archivo: docker.go Proyecto: vmware/vic
// FetchImageBlob fetches the image blob
func FetchImageBlob(ctx context.Context, options Options, image *ImageWithMeta, progressOutput progress.Output) (string, error) {
	defer trace.End(trace.Begin(options.Image + "/" + image.Layer.BlobSum))

	id := image.ID
	layer := image.Layer.BlobSum
	meta := image.Meta
	diffID := ""

	url, err := url.Parse(options.Registry)
	if err != nil {
		return diffID, err
	}
	url.Path = path.Join(url.Path, options.Image, "blobs", layer)

	log.Debugf("URL: %s\n ", url)

	fetcher := urlfetcher.NewURLFetcher(urlfetcher.Options{
		Timeout:            options.Timeout,
		Username:           options.Username,
		Password:           options.Password,
		Token:              options.Token,
		InsecureSkipVerify: options.InsecureSkipVerify,
	})

	// ctx
	ctx, cancel := context.WithTimeout(ctx, options.Timeout)
	defer cancel()

	imageFileName, err := fetcher.Fetch(ctx, url, true, progressOutput, image.String())
	if err != nil {
		return diffID, err
	}

	// Cleanup function for the error case
	defer func() {
		if err != nil {
			os.Remove(imageFileName)
		}
	}()

	// Open the file so that we can use it as a io.Reader for sha256 calculation
	imageFile, err := os.Open(string(imageFileName))
	if err != nil {
		return diffID, err
	}
	defer imageFile.Close()

	// blobSum is the sha of the compressed layer
	blobSum := sha256.New()

	// diffIDSum is the sha of the uncompressed layer
	diffIDSum := sha256.New()

	// blobTr is an io.TeeReader that writes bytes to blobSum that it reads from imageFile
	// see https://golang.org/pkg/io/#TeeReader
	blobTr := io.TeeReader(imageFile, blobSum)

	progress.Update(progressOutput, image.String(), "Verifying Checksum")
	decompressedTar, err := archive.DecompressStream(blobTr)
	if err != nil {
		return diffID, err
	}

	// Copy bytes from decompressed layer into diffIDSum to calculate diffID
	_, cerr := io.Copy(diffIDSum, decompressedTar)
	if cerr != nil {
		return diffID, cerr
	}

	bs := fmt.Sprintf("sha256:%x", blobSum.Sum(nil))
	if bs != layer {
		return diffID, fmt.Errorf("Failed to validate layer checksum. Expected %s got %s", layer, bs)
	}

	diffID = fmt.Sprintf("sha256:%x", diffIDSum.Sum(nil))

	// this isn't an empty layer, so we need to calculate the size
	if diffID != string(DigestSHA256EmptyTar) {
		var layerSize int64

		// seek to the beginning of the file
		imageFile.Seek(0, 0)

		// recreate the decompressed tar Reader
		decompressedTar, err := archive.DecompressStream(imageFile)
		if err != nil {
			return "", err
		}

		// get a tar reader for access to the files in the archive
		tr := tar.NewReader(decompressedTar)

		// iterate through tar headers to get file sizes
		for {
			tarHeader, err := tr.Next()
			if err == io.EOF {
				break
			}
			if err != nil {
				return "", err
			}
			layerSize += tarHeader.Size
		}

		image.Size = layerSize
	}

	log.Infof("diffID for layer %s: %s", id, diffID)

	// Ensure the parent directory exists
	destination := path.Join(DestinationDirectory(options), id)
	err = os.MkdirAll(destination, 0755) /* #nosec */
	if err != nil {
		return diffID, err
	}

	// Move(rename) the temporary file to its final destination
	err = os.Rename(string(imageFileName), path.Join(destination, id+".tar"))
	if err != nil {
		return diffID, err
	}

	// Dump the history next to it
	err = ioutil.WriteFile(path.Join(destination, id+".json"), []byte(meta), 0644)
	if err != nil {
		return diffID, err
	}

	progress.Update(progressOutput, image.String(), "Download complete")

	return diffID, nil
}
Ejemplo n.º 18
0
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) {
	if fs, ok := pd.layer.(distribution.Describable); ok {
		if d := fs.Descriptor(); len(d.URLs) > 0 {
			progress.Update(progressOutput, pd.ID(), "Skipped foreign layer")
			return d, nil
		}
	}

	diffID := pd.DiffID()

	pd.pushState.Lock()
	if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok {
		// it is already known that the push is not needed and
		// therefore doing a stat is unnecessary
		pd.pushState.Unlock()
		progress.Update(progressOutput, pd.ID(), "Layer already exists")
		return descriptor, nil
	}
	pd.pushState.Unlock()

	// Do we have any metadata associated with this layer's DiffID?
	v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID)
	if err == nil {
		descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState)
		if err != nil {
			progress.Update(progressOutput, pd.ID(), "Image push failed")
			return distribution.Descriptor{}, retryOnError(err)
		}
		if exists {
			progress.Update(progressOutput, pd.ID(), "Layer already exists")
			pd.pushState.Lock()
			pd.pushState.remoteLayers[diffID] = descriptor
			pd.pushState.Unlock()
			return descriptor, nil
		}
	}

	logrus.Debugf("Pushing layer: %s", diffID)

	// if digest was empty or not saved, or if blob does not exist on the remote repository,
	// then push the blob.
	bs := pd.repo.Blobs(ctx)

	var layerUpload distribution.BlobWriter
	mountAttemptsRemaining := 3

	// Attempt to find another repository in the same registry to mount the layer
	// from to avoid an unnecessary upload.
	// Note: metadata is stored from oldest to newest, so we iterate through this
	// slice in reverse to maximize our chances of the blob still existing in the
	// remote repository.
	for i := len(v2Metadata) - 1; i >= 0 && mountAttemptsRemaining > 0; i-- {
		mountFrom := v2Metadata[i]

		sourceRepo, err := reference.ParseNamed(mountFrom.SourceRepository)
		if err != nil {
			continue
		}
		if pd.repoInfo.Hostname() != sourceRepo.Hostname() {
			// don't mount blobs from another registry
			continue
		}

		namedRef, err := reference.WithName(mountFrom.SourceRepository)
		if err != nil {
			continue
		}

		// TODO (brianbland): We need to construct a reference where the Name is
		// only the full remote name, so clean this up when distribution has a
		// richer reference package
		remoteRef, err := distreference.WithName(namedRef.RemoteName())
		if err != nil {
			continue
		}

		canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest)
		if err != nil {
			continue
		}

		logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountFrom.Digest, sourceRepo.FullName())

		layerUpload, err = bs.Create(ctx, client.WithMountFrom(canonicalRef))
		switch err := err.(type) {
		case distribution.ErrBlobMounted:
			progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name())

			err.Descriptor.MediaType = schema2.MediaTypeLayer

			pd.pushState.Lock()
			pd.pushState.confirmedV2 = true
			pd.pushState.remoteLayers[diffID] = err.Descriptor
			pd.pushState.Unlock()

			// Cache mapping from this layer's DiffID to the blobsum
			if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
				return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
			}
			return err.Descriptor, nil
		case nil:
			// blob upload session created successfully, so begin the upload
			mountAttemptsRemaining = 0
		default:
			// unable to mount layer from this repository, so this source mapping is no longer valid
			logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository)
			pd.v2MetadataService.Remove(mountFrom)
			mountAttemptsRemaining--
		}
	}

	if layerUpload == nil {
		layerUpload, err = bs.Create(ctx)
		if err != nil {
			return distribution.Descriptor{}, retryOnError(err)
		}
	}
	defer layerUpload.Close()

	arch, err := pd.layer.TarStream()
	if err != nil {
		return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
	}

	// don't care if this fails; best effort
	size, _ := pd.layer.DiffSize()

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing")
	compressedReader, compressionDone := compress(reader)
	defer func() {
		reader.Close()
		<-compressionDone
	}()

	digester := digest.Canonical.New()
	tee := io.TeeReader(compressedReader, digester.Hash())

	nn, err := layerUpload.ReadFrom(tee)
	compressedReader.Close()
	if err != nil {
		return distribution.Descriptor{}, retryOnError(err)
	}

	pushDigest := digester.Digest()
	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
		return distribution.Descriptor{}, retryOnError(err)
	}

	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
	progress.Update(progressOutput, pd.ID(), "Pushed")

	// Cache mapping from this layer's DiffID to the blobsum
	if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil {
		return distribution.Descriptor{}, xfer.DoNotRetry{Err: err}
	}

	pd.pushState.Lock()

	// If Commit succeeded, that's an indication that the remote registry
	// speaks the v2 protocol.
	pd.pushState.confirmedV2 = true

	descriptor := distribution.Descriptor{
		Digest:    pushDigest,
		MediaType: schema2.MediaTypeLayer,
		Size:      nn,
	}
	pd.pushState.remoteLayers[diffID] = descriptor

	pd.pushState.Unlock()

	return descriptor, nil
}
Ejemplo n.º 19
0
// makeDownloadFunc returns a function that performs the layer download and
// registration. If parentDownload is non-nil, it waits for that download to
// complete before the registration step, and registers the downloaded data
// on top of parentDownload's resulting layer. Otherwise, it registers the
// layer on top of the ChainID given by parentLayer.
func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc {
	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
		d := &downloadTransfer{
			Transfer:   NewTransfer(),
			layerStore: ldm.layerStore,
		}

		go func() {
			defer func() {
				close(progressChan)
			}()

			progressOutput := progress.ChanOutput(progressChan)

			select {
			case <-start:
			default:
				progress.Update(progressOutput, descriptor.ID(), "Waiting")
				<-start
			}

			if parentDownload != nil {
				// Did the parent download already fail or get
				// cancelled?
				select {
				case <-parentDownload.Done():
					_, err := parentDownload.result()
					if err != nil {
						d.err = err
						return
					}
				default:
				}
			}

			var (
				downloadReader io.ReadCloser
				size           int64
				err            error
				retries        int
			)

			defer descriptor.Close()

			for {
				downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput)
				if err == nil {
					break
				}

				// If an error was returned because the context
				// was cancelled, we shouldn't retry.
				select {
				case <-d.Transfer.Context().Done():
					d.err = err
					return
				default:
				}

				retries++
				if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts {
					logrus.Errorf("Download failed: %v", err)
					d.err = err
					return
				}

				logrus.Errorf("Download failed, retrying: %v", err)
				delay := retries * 5
				ticker := time.NewTicker(time.Second)

			selectLoop:
				for {
					progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
					select {
					case <-ticker.C:
						delay--
						if delay == 0 {
							ticker.Stop()
							break selectLoop
						}
					case <-d.Transfer.Context().Done():
						ticker.Stop()
						d.err = errors.New("download cancelled during retry delay")
						return
					}

				}
			}

			close(inactive)

			if parentDownload != nil {
				select {
				case <-d.Transfer.Context().Done():
					d.err = errors.New("layer registration cancelled")
					downloadReader.Close()
					return
				case <-parentDownload.Done():
				}

				l, err := parentDownload.result()
				if err != nil {
					d.err = err
					downloadReader.Close()
					return
				}
				parentLayer = l.ChainID()
			}

			reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting")
			defer reader.Close()

			inflatedLayerData, err := archive.DecompressStream(reader)
			if err != nil {
				d.err = fmt.Errorf("could not get decompression stream: %v", err)
				return
			}

			var src distribution.Descriptor
			if fs, ok := descriptor.(distribution.Describable); ok {
				src = fs.Descriptor()
			}
			if ds, ok := d.layerStore.(layer.DescribableStore); ok {
				d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src)
			} else {
				d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer)
			}
			if err != nil {
				select {
				case <-d.Transfer.Context().Done():
					d.err = errors.New("layer registration cancelled")
				default:
					d.err = fmt.Errorf("failed to register layer: %v", err)
				}
				return
			}

			progress.Update(progressOutput, descriptor.ID(), "Pull complete")
			withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered)
			if hasRegistered {
				withRegistered.Registered(d.layer.DiffID())
			}

			// Doesn't actually need to be its own goroutine, but
			// done like this so we can defer close(c).
			go func() {
				<-d.Transfer.Released()
				if d.layer != nil {
					layer.ReleaseAndLog(d.layerStore, d.layer)
				}
			}()
		}()

		return d
	}
}
Ejemplo n.º 20
0
func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
	logrus.Debugf("pulling blob %q", ld.digest)

	blobs := ld.repo.Blobs(ctx)

	layerDownload, err := blobs.Open(ctx, ld.digest)
	if err != nil {
		logrus.Debugf("Error statting layer: %v", err)
		if err == distribution.ErrBlobUnknown {
			return nil, 0, xfer.DoNotRetry{Err: err}
		}
		return nil, 0, retryOnError(err)
	}

	size, err := layerDownload.Seek(0, os.SEEK_END)
	if err != nil {
		// Seek failed, perhaps because there was no Content-Length
		// header. This shouldn't fail the download, because we can
		// still continue without a progress bar.
		size = 0
	} else {
		// Restore the seek offset at the beginning of the stream.
		_, err = layerDownload.Seek(0, os.SEEK_SET)
		if err != nil {
			return nil, 0, err
		}
	}

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size, ld.ID(), "Downloading")
	defer reader.Close()

	verifier, err := digest.NewDigestVerifier(ld.digest)
	if err != nil {
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	tmpFile, err := ioutil.TempFile("", "GetImageBlob")
	if err != nil {
		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	_, err = io.Copy(tmpFile, io.TeeReader(reader, verifier))
	if err != nil {
		return nil, 0, retryOnError(err)
	}

	progress.Update(progressOutput, ld.ID(), "Verifying Checksum")

	if !verifier.Verified() {
		err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
		logrus.Error(err)
		tmpFile.Close()
		if err := os.RemoveAll(tmpFile.Name()); err != nil {
			logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name())
		}

		return nil, 0, xfer.DoNotRetry{Err: err}
	}

	progress.Update(progressOutput, ld.ID(), "Download complete")

	logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name())

	tmpFile.Seek(0, 0)
	return ioutils.NewReadCloserWrapper(tmpFile, tmpFileCloser(tmpFile)), size, nil
}
Ejemplo n.º 21
0
// ImagesToDownload creates a slice of ImageWithMeta for the images that needs to be downloaded
func ImagesToDownload(manifest *Manifest, storeName string) ([]*ImageWithMeta, *ImageWithMeta, error) {
	images := make([]*ImageWithMeta, len(manifest.FSLayers))

	v1 := docker.V1Image{}
	// iterate from parent to children
	for i := len(manifest.History) - 1; i >= 0; i-- {
		history := manifest.History[i]
		layer := manifest.FSLayers[i]

		// unmarshall V1Compatibility to get the image ID
		if err := json.Unmarshal([]byte(history.V1Compatibility), &v1); err != nil {
			return nil, nil, fmt.Errorf("Failed to unmarshall image history: %s", err)
		}

		// if parent is empty set it to scratch
		parent := "scratch"
		if v1.Parent != "" {
			parent = v1.Parent
		}

		// add image to ImageWithMeta list
		images[i] = &ImageWithMeta{
			Image: &models.Image{
				ID:     v1.ID,
				Parent: &parent,
				Store:  storeName,
			},
			meta:   history.V1Compatibility,
			layer:  layer,
			diffID: "",
		}
		log.Debugf("ImagesToDownload Manifest image: %#v", images[i])
	}

	// return early if -standalone set
	if options.standalone {
		return images, nil, nil
	}

	// Create the image store just in case
	err := CreateImageStore(storeName)
	if err != nil {
		return nil, nil, fmt.Errorf("Failed to create image store: %s", err)
	}

	// Get the list of known images from the storage layer
	existingImages, err := ListImages(storeName, images)
	if err != nil {
		return nil, nil, fmt.Errorf("Failed to obtain list of images: %s", err)
	}

	for i := range existingImages {
		log.Debugf("Existing image: %#v", existingImages[i])
	}

	// grab the imageLayer for use in later evaluation of metadata update
	imageLayer := images[0]

	// iterate from parent to children
	// so that we can delete from the slice
	// while iterating over it
	for i := len(images) - 1; i >= 0; i-- {
		ID := images[i].ID
		// Check whether storage layer knows this image ID
		if _, ok := existingImages[ID]; ok {
			log.Debugf("%s already exists", ID)
			// update the progress before deleting it from the slice
			progress.Update(po, images[i].String(), "Already exists")

			// delete existing image from images
			images = append(images[:i], images[i+1:]...)
		}
	}

	return images, imageLayer, nil
}
Ejemplo n.º 22
0
func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
		u := &uploadTransfer{
			Transfer: NewTransfer(),
		}

		go func() {
			defer func() {
				close(progressChan)
			}()

			progressOutput := progress.ChanOutput(progressChan)

			select {
			case <-start:
			default:
				progress.Update(progressOutput, descriptor.ID(), "Waiting")
				<-start
			}

			retries := 0
			for {
				remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
				if err == nil {
					u.remoteDescriptor = remoteDescriptor
					break
				}

				// If an error was returned because the context
				// was cancelled, we shouldn't retry.
				select {
				case <-u.Transfer.Context().Done():
					u.err = err
					return
				default:
				}

				retries++
				if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts {
					logrus.Errorf("Upload failed: %v", err)
					u.err = err
					return
				}

				logrus.Errorf("Upload failed, retrying: %v", err)
				delay := retries * 5
				ticker := time.NewTicker(time.Second)

			selectLoop:
				for {
					progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1])
					select {
					case <-ticker.C:
						delay--
						if delay == 0 {
							ticker.Stop()
							break selectLoop
						}
					case <-u.Transfer.Context().Done():
						ticker.Stop()
						u.err = errors.New("upload cancelled during retry delay")
						return
					}
				}
			}
		}()

		return u
	}
}
Ejemplo n.º 23
0
// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata"
// slice. If it finds one that the registry knows about, it returns the known digest and "true". If
// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository
// (not just the target one).
func (pd *v2PushDescriptor) layerAlreadyExists(
	ctx context.Context,
	progressOutput progress.Output,
	diffID layer.DiffID,
	checkOtherRepositories bool,
	maxExistenceCheckAttempts int,
	v2Metadata []metadata.V2Metadata,
) (desc distribution.Descriptor, exists bool, err error) {
	// filter the metadata
	candidates := []metadata.V2Metadata{}
	for _, meta := range v2Metadata {
		if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.FullName() {
			continue
		}
		candidates = append(candidates, meta)
	}
	// sort the candidates by similarity
	sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates)

	digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata)
	// an array of unique blob digests ordered from the best mount candidates to worst
	layerDigests := []digest.Digest{}
	for i := 0; i < len(candidates); i++ {
		if len(layerDigests) >= maxExistenceCheckAttempts {
			break
		}
		meta := &candidates[i]
		if _, exists := digestToMetadata[meta.Digest]; exists {
			// keep reference just to the first mapping (the best mount candidate)
			continue
		}
		if _, exists := pd.checkedDigests[meta.Digest]; exists {
			// existence of this digest has already been tested
			continue
		}
		digestToMetadata[meta.Digest] = meta
		layerDigests = append(layerDigests, meta.Digest)
	}

attempts:
	for _, dgst := range layerDigests {
		meta := digestToMetadata[dgst]
		logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName())
		desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst)
		pd.checkedDigests[meta.Digest] = struct{}{}
		switch err {
		case nil:
			if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.FullName() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) {
				// cache mapping from this layer's DiffID to the blobsum
				if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{
					Digest:           desc.Digest,
					SourceRepository: pd.repoInfo.FullName(),
				}); err != nil {
					return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err}
				}
			}
			desc.MediaType = schema2.MediaTypeLayer
			exists = true
			break attempts
		case distribution.ErrBlobUnknown:
			if meta.SourceRepository == pd.repoInfo.FullName() {
				// remove the mapping to the target repository
				pd.v2MetadataService.Remove(*meta)
			}
		default:
			logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.FullName())
		}
	}

	if exists {
		progress.Update(progressOutput, pd.ID(), "Layer already exists")
		pd.pushState.Lock()
		pd.pushState.remoteLayers[diffID] = desc
		pd.pushState.Unlock()
	}

	return desc, exists, nil
}
Ejemplo n.º 24
0
// Download is a blocking function which ensures the requested layers are
// present in the layer store. It uses the string returned by the Key method to
// deduplicate downloads. If a given layer is not already known to present in
// the layer store, and the key is not used by an in-progress download, the
// Download method is called to get the layer tar data. Layers are then
// registered in the appropriate order.  The caller must call the returned
// release function once it is is done with the returned RootFS object.
func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
	var (
		topLayer       layer.Layer
		topDownload    *downloadTransfer
		watcher        *Watcher
		missingLayer   bool
		transferKey    = ""
		downloadsByKey = make(map[string]*downloadTransfer)
	)

	rootFS := initialRootFS
	for _, descriptor := range layers {
		key := descriptor.Key()
		transferKey += key

		if !missingLayer {
			missingLayer = true
			diffID, err := descriptor.DiffID()
			if err == nil {
				getRootFS := rootFS
				getRootFS.Append(diffID)
				l, err := ldm.layerStore.Get(getRootFS.ChainID())
				if err == nil {
					// Layer already exists.
					logrus.Debugf("Layer already exists: %s", descriptor.ID())
					progress.Update(progressOutput, descriptor.ID(), "Already exists")
					if topLayer != nil {
						layer.ReleaseAndLog(ldm.layerStore, topLayer)
					}
					topLayer = l
					missingLayer = false
					rootFS.Append(diffID)
					continue
				}
			}
		}

		// Does this layer have the same data as a previous layer in
		// the stack? If so, avoid downloading it more than once.
		var topDownloadUncasted Transfer
		if existingDownload, ok := downloadsByKey[key]; ok {
			xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload)
			defer topDownload.Transfer.Release(watcher)
			topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
			topDownload = topDownloadUncasted.(*downloadTransfer)
			continue
		}

		// Layer is not known to exist - download and register it.
		progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer")

		var xferFunc DoFunc
		if topDownload != nil {
			xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload)
			defer topDownload.Transfer.Release(watcher)
		} else {
			xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil)
		}
		topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
		topDownload = topDownloadUncasted.(*downloadTransfer)
		downloadsByKey[key] = topDownload
	}

	if topDownload == nil {
		return rootFS, func() {
			if topLayer != nil {
				layer.ReleaseAndLog(ldm.layerStore, topLayer)
			}
		}, nil
	}

	// Won't be using the list built up so far - will generate it
	// from downloaded layers instead.
	rootFS.DiffIDs = []layer.DiffID{}

	defer func() {
		if topLayer != nil {
			layer.ReleaseAndLog(ldm.layerStore, topLayer)
		}
	}()

	select {
	case <-ctx.Done():
		topDownload.Transfer.Release(watcher)
		return rootFS, func() {}, ctx.Err()
	case <-topDownload.Done():
		break
	}

	l, err := topDownload.result()
	if err != nil {
		topDownload.Transfer.Release(watcher)
		return rootFS, func() {}, err
	}

	// Must do this exactly len(layers) times, so we don't include the
	// base layer on Windows.
	for range layers {
		if l == nil {
			topDownload.Transfer.Release(watcher)
			return rootFS, func() {}, errors.New("internal error: too few parent layers")
		}
		rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...)
		l = l.Parent()
	}
	return rootFS, func() { topDownload.Transfer.Release(watcher) }, err
}
Ejemplo n.º 25
0
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
	diffID := pd.DiffID()

	logrus.Debugf("Pushing layer: %s", diffID)

	// Do we have any blobsums associated with this layer's DiffID?
	possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
	if err == nil {
		dgst, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.layersPushed)
		if err != nil {
			progress.Update(progressOutput, pd.ID(), "Image push failed")
			return "", retryOnError(err)
		}
		if exists {
			progress.Update(progressOutput, pd.ID(), "Layer already exists")
			return dgst, nil
		}
	}

	// if digest was empty or not saved, or if blob does not exist on the remote repository,
	// then push the blob.
	bs := pd.repo.Blobs(ctx)

	// Send the layer
	layerUpload, err := bs.Create(ctx)
	if err != nil {
		return "", retryOnError(err)
	}
	defer layerUpload.Close()

	arch, err := pd.layer.TarStream()
	if err != nil {
		return "", xfer.DoNotRetry{Err: err}
	}

	// don't care if this fails; best effort
	size, _ := pd.layer.DiffSize()

	reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing")
	defer reader.Close()
	compressedReader := compress(reader)

	digester := digest.Canonical.New()
	tee := io.TeeReader(compressedReader, digester.Hash())

	nn, err := layerUpload.ReadFrom(tee)
	compressedReader.Close()
	if err != nil {
		return "", retryOnError(err)
	}

	pushDigest := digester.Digest()
	if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
		return "", retryOnError(err)
	}

	// If Commit succeded, that's an indication that the remote registry
	// speaks the v2 protocol.
	*pd.confirmedV2 = true

	logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
	progress.Update(progressOutput, pd.ID(), "Pushed")

	// Cache mapping from this layer's DiffID to the blobsum
	if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
		return "", xfer.DoNotRetry{Err: err}
	}

	pd.layersPushed.Lock()
	pd.layersPushed.layersPushed[pushDigest] = true
	pd.layersPushed.Unlock()

	return pushDigest, nil
}