Exemple #1
0
func (mf *v2ManifestFetcher) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (img *image.Image, manifestDigest digest.Digest, err error) {
	var verifiedManifest *schema1.Manifest
	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
	if err != nil {
		return nil, "", err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return nil, "", err
	}

	// Image history converted to the new format
	var history []image.History

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		var throwAway struct {
			ThrowAway bool `json:"throwaway,omitempty"`
		}
		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
			return nil, "", err
		}

		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
		if err != nil {
			return nil, "", err
		}
		history = append(history, h)
	}

	rootFS := image.NewRootFS()
	configRaw, err := makeRawConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history)

	config, err := json.Marshal(configRaw)
	if err != nil {
		return nil, "", err
	}

	img, err = image.NewFromJSON(config)
	if err != nil {
		return nil, "", err
	}

	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)

	return img, manifestDigest, nil
}
Exemple #2
0
func (mf *v1ManifestFetcher) pullImageJSON(imgID, endpoint string, token []string) (*image.Image, error) {
	imgJSON, _, err := mf.session.GetRemoteImageJSON(imgID, endpoint)
	if err != nil {
		return nil, err
	}
	h, err := v1.HistoryFromConfig(imgJSON, false)
	if err != nil {
		return nil, err
	}
	configRaw, err := makeRawConfigFromV1Config(imgJSON, image.NewRootFS(), []image.History{h})
	if err != nil {
		return nil, err
	}
	config, err := json.Marshal(configRaw)
	if err != nil {
		return nil, err
	}
	img, err := image.NewFromJSON(config)
	if err != nil {
		return nil, err
	}
	return img, nil
}
Exemple #3
0
func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) {
	defer func() {
		if err != nil {
			logrus.Errorf("migration failed for %v, err: %v", id, err)
		}
	}()

	parent, err := getParent(filepath.Join(root, graphDirName, id))
	if err != nil {
		return err
	}

	var parentID image.ID
	if parent != "" {
		var exists bool
		if parentID, exists = mappings[parent]; !exists {
			if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil {
				// todo: fail or allow broken chains?
				return err
			}
			parentID = mappings[parent]
		}
	}

	rootFS := image.NewRootFS()
	var history []image.History

	if parentID != "" {
		parentImg, err := is.Get(parentID)
		if err != nil {
			return err
		}

		rootFS = parentImg.RootFS
		history = parentImg.History
	}

	diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName))
	if err != nil {
		return err
	}
	diffID, err := digest.ParseDigest(string(diffIDData))
	if err != nil {
		return err
	}

	sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName))
	if err != nil {
		return err
	}
	size, err := strconv.ParseInt(string(sizeStr), 10, 64)
	if err != nil {
		return err
	}

	layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size)
	if err != nil {
		return err
	}
	logrus.Infof("migrated layer %s to %s", id, layer.DiffID())

	jsonFile := filepath.Join(root, graphDirName, id, "json")
	imageJSON, err := ioutil.ReadFile(jsonFile)
	if err != nil {
		return err
	}

	h, err := imagev1.HistoryFromConfig(imageJSON, false)
	if err != nil {
		return err
	}
	history = append(history, h)

	rootFS.Append(layer.DiffID())

	config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history)
	if err != nil {
		return err
	}
	strongID, err := is.Create(config)
	if err != nil {
		return err
	}
	logrus.Infof("migrated image %s to %s", id, strongID)

	if parentID != "" {
		if err := is.SetParent(strongID, parentID); err != nil {
			return err
		}
	}

	checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum"))
	if err == nil { // best effort
		dgst, err := digest.ParseDigest(string(checksum))
		if err == nil {
			V2MetadataService := metadata.NewV2MetadataService(ms)
			V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst})
		}
	}
	_, err = ls.Release(layer)
	if err != nil {
		return err
	}

	mappings[id] = strongID
	return
}
Exemple #4
0
func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
	var verifiedManifest *schema1.Manifest
	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
	if err != nil {
		return "", "", err
	}

	rootFS := image.NewRootFS()

	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
		return "", "", err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return "", "", err
	}

	var descriptors []xfer.DownloadDescriptor

	// Image history converted to the new format
	var history []image.History

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		blobSum := verifiedManifest.FSLayers[i].BlobSum

		var throwAway struct {
			ThrowAway bool `json:"throwaway,omitempty"`
		}
		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
			return "", "", err
		}

		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
		if err != nil {
			return "", "", err
		}
		history = append(history, h)

		if throwAway.ThrowAway {
			continue
		}

		layerDescriptor := &v2LayerDescriptor{
			digest:         blobSum,
			repo:           p.repo,
			blobSumService: p.blobSumService,
		}

		descriptors = append(descriptors, layerDescriptor)
	}

	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
	if err != nil {
		return "", "", err
	}
	defer release()

	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
	if err != nil {
		return "", "", err
	}

	imageID, err = p.config.ImageStore.Create(config)
	if err != nil {
		return "", "", err
	}

	manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)

	return imageID, manifestDigest, nil
}
Exemple #5
0
func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) {
	var history []string
	history, err = p.session.GetRemoteHistory(v1ID, endpoint)
	if err != nil {
		return err
	}
	if len(history) < 1 {
		return fmt.Errorf("empty history for image %s", v1ID)
	}
	progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers")

	var (
		descriptors []xfer.DownloadDescriptor
		newHistory  []image.History
		imgJSON     []byte
		imgSize     int64
	)

	// Iterate over layers, in order from bottom-most to top-most. Download
	// config for all layers and create descriptors.
	for i := len(history) - 1; i >= 0; i-- {
		v1LayerID := history[i]
		imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint)
		if err != nil {
			return err
		}

		// Create a new-style config from the legacy configs
		h, err := v1.HistoryFromConfig(imgJSON, false)
		if err != nil {
			return err
		}
		newHistory = append(newHistory, h)

		layerDescriptor := &v1LayerDescriptor{
			v1LayerID:        v1LayerID,
			indexName:        p.repoInfo.Index.Name,
			endpoint:         endpoint,
			v1IDService:      p.v1IDService,
			layersDownloaded: layersDownloaded,
			layerSize:        imgSize,
			session:          p.session,
		}

		descriptors = append(descriptors, layerDescriptor)
	}

	rootFS := image.NewRootFS()
	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
	if err != nil {
		return err
	}
	defer release()

	config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory)
	if err != nil {
		return err
	}

	imageID, err := p.config.ImageStore.Create(config)
	if err != nil {
		return err
	}

	if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil {
		return err
	}

	return nil
}
Exemple #6
0
func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error {
	if _, loaded := loadedMap[oldID]; loaded {
		return nil
	}
	configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName))
	if err != nil {
		return err
	}
	imageJSON, err := ioutil.ReadFile(configPath)
	if err != nil {
		logrus.Debugf("Error reading json: %v", err)
		return err
	}

	var img struct{ Parent string }
	if err := json.Unmarshal(imageJSON, &img); err != nil {
		return err
	}

	var parentID image.ID
	if img.Parent != "" {
		for {
			var loaded bool
			if parentID, loaded = loadedMap[img.Parent]; !loaded {
				if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil {
					return err
				}
			} else {
				break
			}
		}
	}

	// todo: try to connect with migrate code
	rootFS := image.NewRootFS()
	var history []image.History

	if parentID != "" {
		parentImg, err := l.is.Get(parentID)
		if err != nil {
			return err
		}

		rootFS = parentImg.RootFS
		history = parentImg.History
	}

	layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName))
	if err != nil {
		return err
	}
	newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, progressOutput)
	if err != nil {
		return err
	}
	rootFS.Append(newLayer.DiffID())

	h, err := v1.HistoryFromConfig(imageJSON, false)
	if err != nil {
		return err
	}
	history = append(history, h)

	config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history)
	if err != nil {
		return err
	}
	imgID, err := l.is.Create(config)
	if err != nil {
		return err
	}

	metadata, err := l.ls.Release(newLayer)
	layer.LogReleaseMetadata(metadata)
	if err != nil {
		return err
	}

	if parentID != "" {
		if err := l.is.SetParent(imgID, parentID); err != nil {
			return err
		}
	}

	loadedMap[oldID] = imgID
	return nil
}
Exemple #7
0
func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) {
	defer func() {
		if err != nil {
			logrus.Errorf("migration failed for %v, err: %v", id, err)
		}
	}()

	jsonFile := filepath.Join(root, graphDirName, id, "json")
	imageJSON, err := ioutil.ReadFile(jsonFile)
	if err != nil {
		return err
	}
	var parent struct {
		Parent   string
		ParentID digest.Digest `json:"parent_id"`
	}
	if err := json.Unmarshal(imageJSON, &parent); err != nil {
		return err
	}
	if parent.Parent == "" && parent.ParentID != "" { // v1.9
		parent.Parent = parent.ParentID.Hex()
	}
	// compatibilityID for parent
	parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "parent"))
	if err == nil && len(parentCompatibilityID) > 0 {
		parent.Parent = string(parentCompatibilityID)
	}

	var parentID image.ID
	if parent.Parent != "" {
		var exists bool
		if parentID, exists = mappings[parent.Parent]; !exists {
			if err := migrateImage(parent.Parent, root, ls, is, ms, mappings); err != nil {
				// todo: fail or allow broken chains?
				return err
			}
			parentID = mappings[parent.Parent]
		}
	}

	rootFS := image.NewRootFS()
	var history []image.History

	if parentID != "" {
		parentImg, err := is.Get(parentID)
		if err != nil {
			return err
		}

		rootFS = parentImg.RootFS
		history = parentImg.History
	}

	layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), filepath.Join(filepath.Join(root, graphDirName, id, tarDataFileName)))
	if err != nil {
		return err
	}
	logrus.Infof("migrated layer %s to %s", id, layer.DiffID())

	h, err := imagev1.HistoryFromConfig(imageJSON, false)
	if err != nil {
		return err
	}
	history = append(history, h)

	rootFS.Append(layer.DiffID())

	config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history)
	if err != nil {
		return err
	}
	strongID, err := is.Create(config)
	if err != nil {
		return err
	}
	logrus.Infof("migrated image %s to %s", id, strongID)

	if parentID != "" {
		if err := is.SetParent(strongID, parentID); err != nil {
			return err
		}
	}

	checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum"))
	if err == nil { // best effort
		dgst, err := digest.ParseDigest(string(checksum))
		if err == nil {
			blobSumService := metadata.NewBlobSumService(ms)
			blobSumService.Add(layer.DiffID(), dgst)
		}
	}
	_, err = ls.Release(layer)
	if err != nil {
		return err
	}

	mappings[id] = strongID
	return
}
Exemple #8
0
func (p *v1Puller) pullImage(out io.Writer, v1ID, endpoint string, localNameRef reference.Named) (layersDownloaded bool, err error) {
	var history []string
	history, err = p.session.GetRemoteHistory(v1ID, endpoint)
	if err != nil {
		return false, err
	}
	if len(history) < 1 {
		return false, fmt.Errorf("empty history for image %s", v1ID)
	}
	out.Write(p.sf.FormatProgress(stringid.TruncateID(v1ID), "Pulling dependent layers", nil))
	// FIXME: Try to stream the images?
	// FIXME: Launch the getRemoteImage() in goroutines

	var (
		referencedLayers []layer.Layer
		parentID         layer.ChainID
		newHistory       []image.History
		img              *image.V1Image
		imgJSON          []byte
		imgSize          int64
	)

	defer func() {
		for _, l := range referencedLayers {
			layer.ReleaseAndLog(p.config.LayerStore, l)
		}
	}()

	layersDownloaded = false

	// Iterate over layers from top-most to bottom-most, checking if any
	// already exist on disk.
	var i int
	for i = 0; i != len(history); i++ {
		v1LayerID := history[i]
		// Do we have a mapping for this particular v1 ID on this
		// registry?
		if layerID, err := p.v1IDService.Get(v1LayerID, p.repoInfo.Index.Name); err == nil {
			// Does the layer actually exist
			if l, err := p.config.LayerStore.Get(layerID); err == nil {
				for j := i; j >= 0; j-- {
					logrus.Debugf("Layer already exists: %s", history[j])
					out.Write(p.sf.FormatProgress(stringid.TruncateID(history[j]), "Already exists", nil))
				}
				referencedLayers = append(referencedLayers, l)
				parentID = layerID
				break
			}
		}
	}

	needsDownload := i

	// Iterate over layers, in order from bottom-most to top-most. Download
	// config for all layers, and download actual layer data if needed.
	for i = len(history) - 1; i >= 0; i-- {
		v1LayerID := history[i]
		imgJSON, imgSize, err = p.downloadLayerConfig(out, v1LayerID, endpoint)
		if err != nil {
			return layersDownloaded, err
		}

		img = &image.V1Image{}
		if err := json.Unmarshal(imgJSON, img); err != nil {
			return layersDownloaded, err
		}

		if i < needsDownload {
			l, err := p.downloadLayer(out, v1LayerID, endpoint, parentID, imgSize, &layersDownloaded)

			// Note: This needs to be done even in the error case to avoid
			// stale references to the layer.
			if l != nil {
				referencedLayers = append(referencedLayers, l)
			}
			if err != nil {
				return layersDownloaded, err
			}

			parentID = l.ChainID()
		}

		// Create a new-style config from the legacy configs
		h, err := v1.HistoryFromConfig(imgJSON, false)
		if err != nil {
			return layersDownloaded, err
		}
		newHistory = append(newHistory, h)
	}

	rootFS := image.NewRootFS()
	l := referencedLayers[len(referencedLayers)-1]
	for l != nil {
		rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...)
		l = l.Parent()
	}

	config, err := v1.MakeConfigFromV1Config(imgJSON, rootFS, newHistory)
	if err != nil {
		return layersDownloaded, err
	}

	imageID, err := p.config.ImageStore.Create(config)
	if err != nil {
		return layersDownloaded, err
	}

	if err := p.config.TagStore.Add(localNameRef, imageID, true); err != nil {
		return layersDownloaded, err
	}

	return layersDownloaded, nil
}
Exemple #9
0
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
	tagOrDigest := ""
	if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
		tagOrDigest = tagged.Tag()
	} else if digested, isCanonical := ref.(reference.Canonical); isCanonical {
		tagOrDigest = digested.Digest().String()
	} else {
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
	}

	logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest)

	manSvc, err := p.repo.Manifests(ctx)
	if err != nil {
		return false, err
	}

	unverifiedManifest, err := manSvc.GetByTag(tagOrDigest)
	if err != nil {
		return false, err
	}
	if unverifiedManifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
	}

	// If GetByTag succeeded, we can be confident that the registry on
	// the other side speaks the v2 protocol.
	p.confirmedV2 = true

	var verifiedManifest *schema1.Manifest
	verifiedManifest, err = verifyManifest(unverifiedManifest, ref)
	if err != nil {
		return false, err
	}

	rootFS := image.NewRootFS()

	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
		return false, err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return false, err
	}

	progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name())

	var descriptors []xfer.DownloadDescriptor

	// Image history converted to the new format
	var history []image.History

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		blobSum := verifiedManifest.FSLayers[i].BlobSum

		var throwAway struct {
			ThrowAway bool `json:"throwaway,omitempty"`
		}
		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
			return false, err
		}

		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
		if err != nil {
			return false, err
		}
		history = append(history, h)

		if throwAway.ThrowAway {
			continue
		}

		layerDescriptor := &v2LayerDescriptor{
			digest:         blobSum,
			repo:           p.repo,
			blobSumService: p.blobSumService,
		}

		descriptors = append(descriptors, layerDescriptor)
	}

	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput)
	if err != nil {
		return false, err
	}
	defer release()

	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
	if err != nil {
		return false, err
	}

	imageID, err := p.config.ImageStore.Create(config)
	if err != nil {
		return false, err
	}

	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo)
	if err != nil {
		return false, err
	}

	if manifestDigest != "" {
		progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
	}

	oldTagImageID, err := p.config.ReferenceStore.Get(ref)
	if err == nil && oldTagImageID == imageID {
		return false, nil
	}

	if canonical, ok := ref.(reference.Canonical); ok {
		if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil {
			return false, err
		}
	} else if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil {
		return false, err
	}

	return true, nil
}
Exemple #10
0
func (p *v2Puller) pullV2Tag(out io.Writer, ref reference.Named) (tagUpdated bool, err error) {
	tagOrDigest := ""
	if tagged, isTagged := ref.(reference.Tagged); isTagged {
		tagOrDigest = tagged.Tag()
	} else if digested, isDigested := ref.(reference.Digested); isDigested {
		tagOrDigest = digested.Digest().String()
	} else {
		return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String())
	}

	logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest)

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	unverifiedManifest, err := manSvc.GetByTag(tagOrDigest)
	if err != nil {
		return false, err
	}
	if unverifiedManifest == nil {
		return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
	}
	var verifiedManifest *schema1.Manifest
	verifiedManifest, err = verifyManifest(unverifiedManifest, ref)
	if err != nil {
		return false, err
	}

	rootFS := image.NewRootFS()

	if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil {
		return false, err
	}

	// remove duplicate layers and check parent chain validity
	err = fixManifestLayers(verifiedManifest)
	if err != nil {
		return false, err
	}

	out.Write(p.sf.FormatStatus(tagOrDigest, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	defer func() {
		for _, d := range downloads {
			p.config.Pool.removeWithError(d.poolKey, err)
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}
	}()

	// Image history converted to the new format
	var history []image.History

	poolKey := "v2layer:"
	notFoundLocally := false

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
		blobSum := verifiedManifest.FSLayers[i].BlobSum
		poolKey += blobSum.String()

		var throwAway struct {
			ThrowAway bool `json:"throwaway,omitempty"`
		}
		if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
			return false, err
		}

		h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
		if err != nil {
			return false, err
		}
		history = append(history, h)

		if throwAway.ThrowAway {
			continue
		}

		// Do we have a layer on disk corresponding to the set of
		// blobsums up to this point?
		if !notFoundLocally {
			notFoundLocally = true
			diffID, err := p.blobSumService.GetDiffID(blobSum)
			if err == nil {
				rootFS.Append(diffID)
				if l, err := p.config.LayerStore.Get(rootFS.ChainID()); err == nil {
					notFoundLocally = false
					logrus.Debugf("Layer already exists: %s", blobSum.String())
					out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Already exists", nil))
					defer layer.ReleaseAndLog(p.config.LayerStore, l)
					continue
				} else {
					rootFS.DiffIDs = rootFS.DiffIDs[:len(rootFS.DiffIDs)-1]
				}
			}
		}

		out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Pulling fs layer", nil))

		tmpFile, err := ioutil.TempFile("", "GetImageBlob")
		if err != nil {
			return false, err
		}

		d := &downloadInfo{
			poolKey: poolKey,
			digest:  blobSum,
			tmpFile: tmpFile,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
		}

		downloads = append(downloads, d)

		broadcaster, found := p.config.Pool.add(d.poolKey)
		broadcaster.Add(out)
		d.broadcaster = broadcaster
		if found {
			d.err <- nil
		} else {
			go p.download(d)
		}
	}

	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}

		if d.layer == nil {
			// Wait for a different pull to download and extract
			// this layer.
			err = d.broadcaster.Wait()
			if err != nil {
				return false, err
			}

			diffID, err := p.blobSumService.GetDiffID(d.digest)
			if err != nil {
				return false, err
			}
			rootFS.Append(diffID)

			l, err := p.config.LayerStore.Get(rootFS.ChainID())
			if err != nil {
				return false, err
			}

			defer layer.ReleaseAndLog(p.config.LayerStore, l)

			continue
		}

		d.tmpFile.Seek(0, 0)
		reader := progressreader.New(progressreader.Config{
			In:        d.tmpFile,
			Out:       d.broadcaster,
			Formatter: p.sf,
			Size:      d.size,
			NewLines:  false,
			ID:        stringid.TruncateID(d.digest.String()),
			Action:    "Extracting",
		})

		inflatedLayerData, err := archive.DecompressStream(reader)
		if err != nil {
			return false, fmt.Errorf("could not get decompression stream: %v", err)
		}

		l, err := p.config.LayerStore.Register(inflatedLayerData, rootFS.ChainID())
		if err != nil {
			return false, fmt.Errorf("failed to register layer: %v", err)
		}
		logrus.Debugf("layer %s registered successfully", l.DiffID())
		rootFS.Append(l.DiffID())

		// Cache mapping from this layer's DiffID to the blobsum
		if err := p.blobSumService.Add(l.DiffID(), d.digest); err != nil {
			return false, err
		}

		defer layer.ReleaseAndLog(p.config.LayerStore, l)

		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.digest.String()), "Pull complete", nil))
		d.broadcaster.Close()
		tagUpdated = true
	}

	config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history)
	if err != nil {
		return false, err
	}

	imageID, err := p.config.ImageStore.Create(config)
	if err != nil {
		return false, err
	}

	manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName.Name())
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	var oldTagImageID image.ID
	if !tagUpdated {
		oldTagImageID, err = p.config.TagStore.Get(ref)
		if err != nil || oldTagImageID != imageID {
			tagUpdated = true
		}
	}

	if tagUpdated {
		if canonical, ok := ref.(reference.Canonical); ok {
			if err = p.config.TagStore.AddDigest(canonical, imageID, true); err != nil {
				return false, err
			}
		} else if err = p.config.TagStore.AddTag(ref, imageID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}