Пример #1
0
// writeStatus writes a status message to out. If layersDownloaded is true, the
// status message indicates that a newer image was downloaded. Otherwise, it
// indicates that the image is up to date. requestedTag is the tag the message
// will refer to.
func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) {
	if layersDownloaded {
		progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag)
	} else {
		progress.Message(out, "", "Status: Image is up to date for "+requestedTag)
	}
}
Пример #2
0
func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error {
	progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName())

	tagged, isTagged := ref.(reference.NamedTagged)

	repoData, err := p.session.GetRepositoryData(p.repoInfo)
	if err != nil {
		if strings.Contains(err.Error(), "HTTP code: 404") {
			if isTagged {
				return fmt.Errorf("Error: image %s:%s not found", p.repoInfo.RemoteName(), tagged.Tag())
			}
			return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName())
		}
		// Unexpected HTTP error
		return err
	}

	logrus.Debug("Retrieving the tag list")
	var tagsList map[string]string
	if !isTagged {
		tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo)
	} else {
		var tagID string
		tagsList = make(map[string]string)
		tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag())
		if err == registry.ErrRepoNotFound {
			return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName())
		}
		tagsList[tagged.Tag()] = tagID
	}
	if err != nil {
		logrus.Errorf("unable to get remote tags: %s", err)
		return err
	}

	for tag, id := range tagsList {
		repoData.ImgList[id] = &registry.ImgData{
			ID:       id,
			Tag:      tag,
			Checksum: "",
		}
	}

	layersDownloaded := false
	for _, imgData := range repoData.ImgList {
		if isTagged && imgData.Tag != tagged.Tag() {
			continue
		}

		err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded)
		if err != nil {
			return err
		}
	}

	writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded)
	return nil
}
Пример #3
0
func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error {
	if _, isCanonical := ref.(reference.Canonical); isCanonical {
		// Allowing fallback, because HTTPS v1 is before HTTP v2
		return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
	}

	tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name)
	if err != nil {
		return err
	}
	// Adds Docker-specific headers as well as user-specified headers (metaHeaders)
	tr := transport.NewTransport(
		// TODO(tiborvass): was ReceiveTimeout
		registry.NewTransport(tlsConfig),
		registry.DockerHeaders(dockerversion.DockerUserAgent(), p.config.MetaHeaders)...,
	)
	client := registry.HTTPClient(tr)
	v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(), p.config.MetaHeaders)
	if err != nil {
		logrus.Debugf("Could not get v1 endpoint: %v", err)
		return fallbackError{err: err}
	}
	p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint)
	if err != nil {
		// TODO(dmcgowan): Check if should fallback
		logrus.Debugf("Fallback from error: %s", err)
		return fallbackError{err: err}
	}
	if err := p.pullRepository(ctx, ref); err != nil {
		// TODO(dmcgowan): Check if should fallback
		return err
	}
	progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry.  Important: This registry version will not be supported in future versions of docker.")

	return nil
}
Пример #4
0
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
	manSvc, err := p.repo.Manifests(ctx)
	if err != nil {
		return false, err
	}

	var (
		manifest    distribution.Manifest
		tagOrDigest string // Used for logging/progress only
	)
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
		// NOTE: not using TagService.Get, since it uses HEAD requests
		// against the manifests endpoint, which are not supported by
		// all registry versions.
		manifest, err = manSvc.Get(ctx, "", client.WithTag(tagged.Tag()))
		if err != nil {
			return false, allowV1Fallback(err)
		}
		tagOrDigest = tagged.Tag()
	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
		manifest, err = manSvc.Get(ctx, digested.Digest())
		if err != nil {
			return false, err
		}
		tagOrDigest = digested.Digest().String()
	} else {
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
	}

	if manifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
	}

	// If manSvc.Get succeeded, we can be confident that the registry on
	// the other side speaks the v2 protocol.
	p.confirmedV2 = true

	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())

	var (
		imageID        image.ID
		manifestDigest digest.Digest
	)

	switch v := manifest.(type) {
	case *schema1.SignedManifest:
		imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v)
		if err != nil {
			return false, err
		}
	case *schema2.DeserializedManifest:
		imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v)
		if err != nil {
			return false, err
		}
	case *manifestlist.DeserializedManifestList:
		imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v)
		if err != nil {
			return false, err
		}
	default:
		return false, errors.New("unsupported manifest format")
	}

	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())

	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
	if err == nil {
		if oldTagImageID == imageID {
			return false, nil
		}
	} else if err != reference.ErrDoesNotExist {
		return false, err
	}

	if canonical, ok := ref.(reference.Canonical); ok {
		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
			return false, err
		}
	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
		return false, err
	}

	return true, nil
}
Пример #5
0
func main() {
	defer func() {
		if r := recover(); r != nil {
			fmt.Fprintf(os.Stderr, string(sf.FormatError(fmt.Errorf("%s : %s", r, debug.Stack()))))
		}
	}()

	if version.Show() {
		fmt.Fprintf(os.Stdout, "%s\n", version.String())
		return
	}

	// Enable profiling if mode is set
	switch options.profiling {
	case "cpu":
		defer profile.Start(profile.CPUProfile, profile.ProfilePath("."), profile.Quiet).Stop()
	case "mem":
		defer profile.Start(profile.MemProfile, profile.ProfilePath("."), profile.Quiet).Stop()
	case "block":
		defer profile.Start(profile.BlockProfile, profile.ProfilePath("."), profile.Quiet).Stop()
	default:
		// do nothing
	}

	// Register our custom Error hook
	log.AddHook(NewErrorHook(os.Stderr))

	// Enable runtime tracing if tracing is true
	if options.tracing {
		tracing, err := os.Create(time.Now().Format("2006-01-02T150405.pprof"))
		if err != nil {
			log.Fatalf("Failed to create tracing logfile: %s", err)
		}
		defer tracing.Close()

		if err := trace.Start(tracing); err != nil {
			log.Fatalf("Failed to start tracing: %s", err)
		}
		defer trace.Stop()
	}

	// Open the log file
	f, err := os.OpenFile(options.logfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
	if err != nil {
		log.Fatalf("Failed to open the logfile %s: %s", options.logfile, err)
	}
	defer f.Close()

	// Initiliaze logger with default TextFormatter
	log.SetFormatter(&log.TextFormatter{DisableColors: true, FullTimestamp: true})

	// Set the log level
	if options.debug {
		log.SetLevel(log.DebugLevel)
	}

	// SetOutput to log file and/or stdout
	log.SetOutput(f)
	if options.stdout {
		log.SetOutput(io.MultiWriter(os.Stdout, f))
	}

	// Parse the -reference parameter
	if err = ParseReference(); err != nil {
		log.Fatalf(err.Error())
	}

	// Host is either the host's UUID (if run on vsphere) or the hostname of
	// the system (if run standalone)
	host, err := sys.UUID()
	if host != "" {
		log.Infof("Using UUID (%s) for imagestore name", host)
	} else if options.standalone {
		host, err = os.Hostname()
		log.Infof("Using host (%s) for imagestore name", host)
	}

	if err != nil {
		log.Fatalf("Failed to return the UUID or host name: %s", err)
	}

	if !options.standalone {
		log.Debugf("Running with portlayer")

		// Ping the server to ensure it's at least running
		ok, err := PingPortLayer()
		if err != nil || !ok {
			log.Fatalf("Failed to ping portlayer: %s", err)
		}
	} else {
		log.Debugf("Running standalone")
	}

	// Calculate (and overwrite) the registry URL and make sure that it responds to requests
	options.registry, err = LearnRegistryURL(options)
	if err != nil {
		log.Fatalf("Error while pulling image: %s", err)
	}

	// Get the URL of the OAuth endpoint
	url, err := LearnAuthURL(options)
	if err != nil {
		log.Fatalf("Failed to obtain OAuth endpoint: %s", err)
	}

	// Get the OAuth token - if only we have a URL
	if url != nil {
		token, err := FetchToken(url)
		if err != nil {
			log.Fatalf("Failed to fetch OAuth token: %s", err)
		}
		options.token = token
	}

	// HACK: Required to learn the name of the vmdk from given reference
	// Used by docker personality until metadata support lands
	if !options.resolv {
		progress.Message(po, "", "Pulling from "+options.image)
	}

	// Get the manifest
	manifest, err := FetchImageManifest(options)
	if err != nil {
		if strings.Contains(err.Error(), "image not found") {
			log.Fatalf("Error: image %s not found", options.image)
		} else {
			log.Fatalf("Error while pulling image manifest: %s", err)
		}
	}

	// Create the ImageWithMeta slice to hold Image structs
	images, imageLayer, err := ImagesToDownload(manifest, host)
	if err != nil {
		log.Fatalf(err.Error())
	}

	// HACK: Required to learn the name of the vmdk from given reference
	// Used by docker personality until metadata support lands
	if options.resolv {
		if len(images) > 0 {
			fmt.Printf("%s", images[0].meta)
			os.Exit(0)
		}
		os.Exit(1)
	}

	// Fetch the blobs from registry
	if err := DownloadImageBlobs(images); err != nil {
		log.Fatalf(err.Error())
	}

	if err := CreateImageConfig(images, manifest); err != nil {
		log.Fatalf(err.Error())
	}

	// Write blobs to the storage layer
	if err := WriteImageBlobs(images); err != nil {
		log.Fatalf(err.Error())
	}

	if err := updateImageMetadata(imageLayer, manifest); err != nil {
		log.Fatalf(err.Error())
	}

	progress.Message(po, "", "Digest: "+manifest.Digest)
	if len(images) > 0 {
		progress.Message(po, "", "Status: Downloaded newer image for "+options.image+":"+options.tag)
	} else {
		progress.Message(po, "", "Status: Image is up to date for "+options.image+":"+options.tag)
	}
}
Пример #6
0
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
	tagOrDigest := ""
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
		tagOrDigest = tagged.Tag()
	} else if digested, isCanonical := ref.(reference.Canonical); isCanonical {
		tagOrDigest = digested.Digest().String()
	} else {
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
	}

	logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest)

	manSvc, err := p.repo.Manifests(ctx)
	if err != nil {
		return false, err
	}

	unverifiedManifest, err := manSvc.GetByTag(tagOrDigest)
	if err != nil {
		return false, err
	}
	if unverifiedManifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
	}

	// If GetByTag succeeded, we can be confident that the registry on
	// the other side speaks the v2 protocol.
	p.confirmedV2 = true

	var verifiedManifest *schema1.Manifest
	verifiedManifest, err = verifyManifest(unverifiedManifest, ref)
	if err != nil {
		return false, err
	}

	rootFS := image.NewRootFS()

	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
		return false, err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return false, err
	}

	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())

	var descriptors []xfer.DownloadDescriptor

	// Image history converted to the new format
	var history []image.History

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		blobSum := verifiedManifest.FSLayers[i].BlobSum

		var throwAway struct {
			ThrowAway bool `json:"throwaway,omitempty"`
		}
		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
			return false, err
		}

		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
		if err != nil {
			return false, err
		}
		history = append(history, h)

		if throwAway.ThrowAway {
			continue
		}

		layerDescriptor := &v2LayerDescriptor{
			digest:         blobSum,
			repo:           p.repo,
			blobSumService: p.blobSumService,
		}

		descriptors = append(descriptors, layerDescriptor)
	}

	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
	if err != nil {
		return false, err
	}
	defer release()

	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
	if err != nil {
		return false, err
	}

	imageID, err := p.config.ImageStore.Create(config)
	if err != nil {
		return false, err
	}

	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo)
	if err != nil {
		return false, err
	}

	if manifestDigest != "" {
		progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
	}

	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
	if err == nil && oldTagImageID == imageID {
		return false, nil
	}

	if canonical, ok := ref.(reference.Canonical); ok {
		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
			return false, err
		}
	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
		return false, err
	}

	return true, nil
}
Пример #7
0
// DownloadLayers ensures layers end up in the portlayer's image store
// It handles existing and simultaneous layer download de-duplication
// This code is utilizes Docker's xfer package: https://github.com/docker/docker/tree/v1.11.2/distribution/xfer
func (ldm *LayerDownloader) DownloadLayers(ctx context.Context, ic *ImageC) error {
	defer trace.End(trace.Begin(""))

	var (
		topDownload    *downloadTransfer
		watcher        *xfer.Watcher
		d              xfer.Transfer
		layerCount     = 0
		sf             = streamformatter.NewJSONStreamFormatter()
		progressOutput = &serialProgressOutput{
			c:   make(chan prog, 100),
			out: sf.NewProgressOutput(ic.Outstream, false),
		}
	)

	go progressOutput.run()
	defer progressOutput.stop()

	// lock here so that we get all layers in flight before another client comes along
	ldm.m.Lock()

	// Grab the imageLayers
	layers := ic.ImageLayers

	// iterate backwards through layers to download
	for i := len(layers) - 1; i >= 0; i-- {

		layer := layers[i]
		id := layer.ID

		layerConfig, err := LayerCache().Get(id)
		if err != nil {

			switch err := err.(type) {
			case LayerNotFoundError:

				layerCount++

				// layer does not already exist in store and is not currently in flight, so download it
				progress.Update(progressOutput, layer.String(), "Pulling fs layer")

				xferFunc := ldm.makeDownloadFunc(layer, ic, topDownload, layers)
				d, watcher = ldm.tm.Transfer(id, xferFunc, progressOutput)
				topDownload = d.(*downloadTransfer)

				defer topDownload.Transfer.Release(watcher)

				ldm.registerDownload(topDownload)
				layer.Downloading = true
				LayerCache().Add(layer)

				continue
			default:
				return err
			}
		}

		if layerConfig.Downloading {

			layerCount++

			if existingDownload, ok := ldm.downloadsByID[id]; ok {

				xferFunc := ldm.makeDownloadFuncFromDownload(layer, existingDownload, topDownload, layers)
				d, watcher = ldm.tm.Transfer(id, xferFunc, progressOutput)
				topDownload = d.(*downloadTransfer)

				defer topDownload.Transfer.Release(watcher)

			}
			continue
		}

		progress.Update(progressOutput, layer.String(), "Already exists")
	}

	ldm.m.Unlock()

	// each layer download will block until the parent download finishes,
	// so this will block until the child-most layer, and thus all layers, have finished downloading
	if layerCount > 0 {
		select {
		case <-ctx.Done():
			return ctx.Err()
		case <-topDownload.Done():
		default:
			<-topDownload.Done()
		}
		err := topDownload.result()
		if err != nil {
			return err
		}
	} else {
		if err := updateRepositoryCache(ic); err != nil {
			return err
		}
	}

	progress.Message(progressOutput, "", "Digest: "+ic.ImageManifest.Digest)

	if layerCount > 0 {
		progress.Message(progressOutput, "", "Status: Downloaded newer image for "+ic.Image+":"+ic.Tag)
	} else {
		progress.Message(progressOutput, "", "Status: Image is up to date for "+ic.Image+":"+ic.Tag)
	}

	return nil
}
Пример #8
0
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
	manSvc, err := p.repo.Manifests(ctx)
	if err != nil {
		return false, err
	}

	var (
		manifest    distribution.Manifest
		tagOrDigest string // Used for logging/progress only
	)
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
		if err != nil {
			return false, allowV1Fallback(err)
		}
		tagOrDigest = tagged.Tag()
	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
		manifest, err = manSvc.Get(ctx, digested.Digest())
		if err != nil {
			return false, err
		}
		tagOrDigest = digested.Digest().String()
	} else {
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
	}

	if manifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
	}

	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
		var allowedMediatype bool
		for _, t := range p.config.Schema2Types {
			if m.Manifest.Config.MediaType == t {
				allowedMediatype = true
				break
			}
		}
		if !allowedMediatype {
			configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
			if configClass == "" {
				configClass = "unknown"
			}
			return false, fmt.Errorf("target is %s", configClass)
		}
	}

	// If manSvc.Get succeeded, we can be confident that the registry on
	// the other side speaks the v2 protocol.
	p.confirmedV2 = true

	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())

	var (
		id             digest.Digest
		manifestDigest digest.Digest
	)

	switch v := manifest.(type) {
	case *schema1.SignedManifest:
		if p.config.RequireSchema2 {
			return false, fmt.Errorf("invalid manifest: not schema2")
		}
		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
		if err != nil {
			return false, err
		}
	case *schema2.DeserializedManifest:
		id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
		if err != nil {
			return false, err
		}
	case *manifestlist.DeserializedManifestList:
		id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
		if err != nil {
			return false, err
		}
	default:
		return false, errors.New("unsupported manifest format")
	}

	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())

	if p.config.ReferenceStore != nil {
		oldTagID, err := p.config.ReferenceStore.Get(ref)
		if err == nil {
			if oldTagID == id {
				return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
			}
		} else if err != reference.ErrDoesNotExist {
			return false, err
		}

		if canonical, ok := ref.(reference.Canonical); ok {
			if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
				return false, err
			}
		} else {
			if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
				return false, err
			}
			if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
				return false, err
			}
		}
	}
	return true, nil
}
Пример #9
0
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
	manSvc, err := p.repo.Manifests(ctx)
	if err != nil {
		return false, err
	}

	var (
		manifest    distribution.Manifest
		tagOrDigest string // Used for logging/progress only
	)
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
		manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
		if err != nil {
			return false, allowV1Fallback(err)
		}
		tagOrDigest = tagged.Tag()
	} else if digested, isDigested := ref.(reference.Canonical); isDigested {
		manifest, err = manSvc.Get(ctx, digested.Digest())
		if err != nil {
			return false, err
		}
		tagOrDigest = digested.Digest().String()
	} else {
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
	}

	if manifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
	}

	if m, ok := manifest.(*schema2.DeserializedManifest); ok {
		if m.Manifest.Config.MediaType == schema2.MediaTypePluginConfig ||
			m.Manifest.Config.MediaType == "application/vnd.docker.plugin.image.v0+json" { //TODO: remove this v0 before 1.13 GA
			return false, errMediaTypePlugin
		}
	}

	// If manSvc.Get succeeded, we can be confident that the registry on
	// the other side speaks the v2 protocol.
	p.confirmedV2 = true

	logrus.Debugf("Pulling ref from V2 registry: %s", ref.String())
	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name())

	var (
		id             digest.Digest
		manifestDigest digest.Digest
	)

	switch v := manifest.(type) {
	case *schema1.SignedManifest:
		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
		if err != nil {
			return false, err
		}
	case *schema2.DeserializedManifest:
		id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
		if err != nil {
			return false, err
		}
	case *manifestlist.DeserializedManifestList:
		id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
		if err != nil {
			return false, err
		}
	default:
		return false, errors.New("unsupported manifest format")
	}

	progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())

	oldTagID, err := p.config.ReferenceStore.Get(ref)
	if err == nil {
		if oldTagID == id {
			return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
		}
	} else if err != reference.ErrDoesNotExist {
		return false, err
	}

	if canonical, ok := ref.(reference.Canonical); ok {
		if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
			return false, err
		}
	} else {
		if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
			return false, err
		}
		if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
			return false, err
		}
	}
	return true, nil
}
Пример #10
0
// PullImage pulls an image from docker hub
func (ic *ImageC) PullImage() error {

	// ctx
	ctx, cancel := context.WithTimeout(ctx, ic.Options.Timeout)
	defer cancel()

	// Parse the -reference parameter
	if err := ic.ParseReference(); err != nil {
		log.Errorf(err.Error())
		return err
	}

	// Host is either the host's UUID (if run on vsphere) or the hostname of
	// the system (if run standalone)
	host, err := sys.UUID()
	if err != nil {
		log.Errorf("Failed to return host name: %s", err)
		return err
	}

	if host != "" {
		log.Infof("Using UUID (%s) for imagestore name", host)
	}

	ic.Storename = host

	// Ping the server to ensure it's at least running
	ok, err := PingPortLayer(ic.Host)
	if err != nil || !ok {
		log.Errorf("Failed to ping portlayer: %s", err)
		return err
	}

	// Calculate (and overwrite) the registry URL and make sure that it responds to requests
	ic.Registry, err = LearnRegistryURL(ic.Options)
	if err != nil {
		log.Errorf("Error while pulling image: %s", err)
		return err
	}

	// Get the URL of the OAuth endpoint
	url, err := LearnAuthURL(ic.Options)
	if err != nil {
		log.Infof(err.Error())
		switch err := err.(type) {
		case urlfetcher.ImageNotFoundError:
			return fmt.Errorf("Error: image %s not found", ic.Reference)
		default:
			return fmt.Errorf("Failed to obtain OAuth endpoint: %s", err)
		}
	}

	// Get the OAuth token - if only we have a URL
	if url != nil {
		token, err := FetchToken(ctx, ic.Options, url, ic.progressOutput)
		if err != nil {
			log.Errorf("Failed to fetch OAuth token: %s", err)
			return err
		}
		ic.Token = token
	}

	progress.Message(ic.progressOutput, "", "Pulling from "+ic.Image)

	// Get the manifest
	manifest, err := FetchImageManifest(ctx, ic.Options, ic.progressOutput)
	if err != nil {
		log.Infof(err.Error())
		switch err := err.(type) {
		case urlfetcher.ImageNotFoundError:
			return fmt.Errorf("Error: image %s not found", ic.Image)
		case urlfetcher.TagNotFoundError:
			return fmt.Errorf("Tag %s not found in repository %s", ic.Tag, ic.Image)
		default:
			return fmt.Errorf("Error while pulling image manifest: %s", err)
		}
	}

	ic.ImageManifest = manifest
	layers, err := ic.LayersToDownload()
	if err != nil {
		return err
	}
	ic.ImageLayers = layers

	err = ldm.DownloadLayers(ctx, ic)
	if err != nil {
		return err
	}

	return nil
}