예제 #1
0
파일: load.go 프로젝트: Gandi/docker
func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
	if err := eng.Job("image_get", address).Run(); err != nil {
		log.Debugf("Loading %s", address)

		imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
		if err != nil {
			log.Debugf("Error reading json", err)
			return err
		}

		layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
		if err != nil {
			log.Debugf("Error reading embedded tar", err)
			return err
		}
		img, err := image.NewImgJSON(imageJson)
		if err != nil {
			log.Debugf("Error unmarshalling json", err)
			return err
		}
		if img.Parent != "" {
			if !s.graph.Exists(img.Parent) {
				if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
					return err
				}
			}
		}
		if err := s.graph.Register(img, imageJson, layer); err != nil {
			return err
		}
	}
	log.Debugf("Completed processing %s", address)

	return nil
}
예제 #2
0
func newContentAddressableImage(v1Compatibility []byte, blobSum digest.Digest, parent digest.Digest) (contentAddressableDescriptor, error) {
	img := contentAddressableDescriptor{
		v1Compatibility: v1Compatibility,
	}

	var err error
	img.config, err = image.MakeImageConfig(v1Compatibility, blobSum, parent)
	if err != nil {
		return img, err
	}
	img.strongID, err = image.StrongID(img.config)
	if err != nil {
		return img, err
	}

	unmarshalledConfig, err := image.NewImgJSON(v1Compatibility)
	if err != nil {
		return img, err
	}

	img.compatibilityID = unmarshalledConfig.ID
	img.id = img.strongID.Hex()

	return img, nil
}
예제 #3
0
// storeImage stores file system layer data for the given image to the
// graph's storage driver. Image metadata is stored in a file
// at the specified root directory.
func (graph *Graph) storeImage(id, parent string, config []byte, layerData io.Reader, root string) (err error) {
	var size int64
	// Store the layer. If layerData is not nil, unpack it into the new layer
	if layerData != nil {
		if size, err = graph.disassembleAndApplyTarLayer(id, parent, layerData, root); err != nil {
			return err
		}
	}

	if err := graph.saveSize(root, size); err != nil {
		return err
	}

	if err := ioutil.WriteFile(jsonPath(root), config, 0600); err != nil {
		return err
	}

	// If image is pointing to a parent via CompatibilityID write the reference to disk
	img, err := image.NewImgJSON(config)
	if err != nil {
		return err
	}

	if img.ParentID.Validate() == nil && parent != img.ParentID.Hex() {
		if err := ioutil.WriteFile(filepath.Join(root, parentFileName), []byte(parent), 0600); err != nil {
			return err
		}
	}
	return nil
}
예제 #4
0
// storeImage stores file system layer data for the given image to the
// graph's storage driver. Image metadata is stored in a file
// at the specified root directory.
func (graph *Graph) storeImage(id, parent string, config []byte, layerData archive.ArchiveReader, root string) (err error) {
	var size int64
	if wd, ok := graph.driver.(*windows.WindowsGraphDriver); ok {
		// Store the layer. If layerData is not nil and this isn't a base image,
		// unpack it into the new layer
		if layerData != nil && parent != "" {
			var ids []string
			parentImg, err := graph.Get(parent)
			if err != nil {
				return err
			}

			ids, err = graph.ParentLayerIds(parentImg)
			if err != nil {
				return err
			}

			if size, err = wd.Import(id, layerData, wd.LayerIdsToPaths(ids)); err != nil {
				return err
			}
		}

	} else {
		// We keep this functionality here so that we can still work with the
		// VFS driver during development. This will not be used for actual running
		// of Windows containers. Without this code, it would not be possible to
		// docker pull using the VFS driver.

		// Store the layer. If layerData is not nil, unpack it into the new layer
		if layerData != nil {
			if size, err = graph.disassembleAndApplyTarLayer(id, parent, layerData, root); err != nil {
				return err
			}
		}

		if err := graph.saveSize(root, size); err != nil {
			return err
		}

		if err := ioutil.WriteFile(jsonPath(root), config, 0600); err != nil {
			return err
		}

		// If image is pointing to a parent via CompatibilityID write the reference to disk
		img, err := image.NewImgJSON(config)
		if err != nil {
			return err
		}

		if img.ParentID.Validate() == nil && parent != img.ParentID.Hex() {
			if err := ioutil.WriteFile(filepath.Join(root, parentFileName), []byte(parent), 0600); err != nil {
				return err
			}
		}

		return nil
	}
}
예제 #5
0
func (graph *FakeGraph) SetExists(imageID string, imgJSON []byte) {
	graph.mutex.Lock()
	defer graph.mutex.Unlock()
	img, err := image.NewImgJSON(imgJSON)
	if err != nil {
		panic("bad imgJSON for imageID: " + imageID)
	}
	graph.exists[imageID] = img
}
예제 #6
0
// getV1ID returns the compatibilityID for the ID in the graph. compatibilityID
// is read from from the v1Compatibility config file in the disk.
func (p *v1Pusher) getV1ID(id string) (string, error) {
	jsonData, err := p.getV1Config(id)
	if err != nil {
		return "", err
	}
	img, err := image.NewImgJSON(jsonData)
	if err != nil {
		return "", err
	}
	return img.ID, nil
}
예제 #7
0
파일: load.go 프로젝트: jbbarth/docker
func (s *TagStore) recursiveLoad(eng *engine.Engine, address, tmpImageDir string) error {
	if err := eng.Job("image_get", address).Run(); err != nil {
		logrus.Debugf("Loading %s", address)

		imageJson, err := ioutil.ReadFile(path.Join(tmpImageDir, "repo", address, "json"))
		if err != nil {
			logrus.Debugf("Error reading json", err)
			return err
		}

		layer, err := os.Open(path.Join(tmpImageDir, "repo", address, "layer.tar"))
		if err != nil {
			logrus.Debugf("Error reading embedded tar", err)
			return err
		}
		img, err := image.NewImgJSON(imageJson)
		if err != nil {
			logrus.Debugf("Error unmarshalling json", err)
			return err
		}
		if err := utils.ValidateID(img.ID); err != nil {
			logrus.Debugf("Error validating ID: %s", err)
			return err
		}

		// ensure no two downloads of the same layer happen at the same time
		if c, err := s.poolAdd("pull", "layer:"+img.ID); err != nil {
			if c != nil {
				logrus.Debugf("Image (id: %s) load is already running, waiting: %v", img.ID, err)
				<-c
				return nil
			}

			return err
		}

		defer s.poolRemove("pull", "layer:"+img.ID)

		if img.Parent != "" {
			if !s.graph.Exists(img.Parent) {
				if err := s.recursiveLoad(eng, img.Parent, tmpImageDir); err != nil {
					return err
				}
			}
		}
		if err := s.graph.Register(img, layer); err != nil {
			return err
		}
	}
	logrus.Debugf("Completed processing %s", address)

	return nil
}
예제 #8
0
파일: load.go 프로젝트: maaquib/docker
func (s *TagStore) recursiveLoad(address, tmpImageDir string) error {
	if _, err := s.LookupImage(address); err != nil {
		logrus.Debugf("Loading %s", address)

		imageJSON, err := ioutil.ReadFile(filepath.Join(tmpImageDir, "repo", address, "json"))
		if err != nil {
			logrus.Debugf("Error reading json: %v", err)
			return err
		}

		layer, err := os.Open(filepath.Join(tmpImageDir, "repo", address, "layer.tar"))
		if err != nil {
			logrus.Debugf("Error reading embedded tar: %v", err)
			return err
		}
		img, err := image.NewImgJSON(imageJSON)
		if err != nil {
			logrus.Debugf("Error unmarshalling json: %v", err)
			return err
		}
		if err := image.ValidateID(img.ID); err != nil {
			logrus.Debugf("Error validating ID: %v", err)
			return err
		}

		// ensure no two downloads of the same layer happen at the same time
		poolKey := "layer:" + img.ID
		broadcaster, found := s.poolAdd("pull", poolKey)
		if found {
			logrus.Debugf("Image (id: %s) load is already running, waiting", img.ID)
			return broadcaster.Wait()
		}

		defer s.poolRemove("pull", poolKey)

		if img.Parent != "" {
			if !s.graph.Exists(img.Parent) {
				if err := s.recursiveLoad(img.Parent, tmpImageDir); err != nil {
					return err
				}
			}
		}
		if err := s.graph.Register(v1Descriptor{img}, layer); err != nil {
			return err
		}
		logrus.Debugf("Completed processing %s", address)
		return nil
	}
	logrus.Debugf("already loaded %s", address)

	return nil
}
예제 #9
0
func (fetcher *RemoteV1Fetcher) fetchLayer(request *FetchRequest, endpointURL string, layerID string, remaining int64, logger lager.Logger) (*dockerLayer, error) {
	fetcher.GraphLock.Acquire(layerID)
	defer fetcher.GraphLock.Release(layerID)

	if img, err := fetcher.Cake.Get(layercake.DockerImageID(layerID)); err == nil {
		request.Logger.Info("using-cached", lager.Data{
			"layer": layerID,
			"size":  img.Size,
		})

		return &dockerLayer{imgEnv(img, request.Logger), imgVolumes(img), img.Size}, nil
	}

	imgJSON, imgSize, err := request.Session.GetRemoteImageJSON(layerID, endpointURL)
	if err != nil {
		return nil, fmt.Errorf("get remote image JSON: %v", err)
	}

	img, err := image.NewImgJSON(imgJSON)
	if err != nil {
		return nil, fmt.Errorf("new image JSON: %v", err)
	}

	layer, err := request.Session.GetRemoteImageLayer(img.ID, endpointURL, int64(imgSize))
	if err != nil {
		return nil, fmt.Errorf("get remote image layer: %v", err)
	}

	defer layer.Close()

	started := time.Now()

	request.Logger.Info("downloading", lager.Data{
		"layer": layerID,
	})

	err = fetcher.Cake.Register(img, &QuotaedReader{R: layer, N: remaining})
	if err != nil {
		return nil, fmt.Errorf("register: %s", err)
	}

	request.Logger.Info("downloaded", lager.Data{
		"layer": layerID,
		"took":  time.Since(started),
		"vols":  imgVolumes(img),
		"size":  img.Size,
	})

	return &dockerLayer{imgEnv(img, request.Logger), imgVolumes(img), img.Size}, nil
}
예제 #10
0
파일: pull_v2.go 프로젝트: m1911/hyper
// fixManifestLayers removes repeated layers from the manifest and checks the
// correctness of the parent chain.
func fixManifestLayers(m *schema1.Manifest) error {
	images := make([]*image.Image, len(m.FSLayers))
	for i := range m.FSLayers {
		img, err := image.NewImgJSON([]byte(m.History[i].V1Compatibility))
		if err != nil {
			return err
		}
		images[i] = img
		if err := image.ValidateID(img.ID); err != nil {
			return err
		}
	}

	if images[len(images)-1].Parent != "" && !allowBaseParentImage {
		// Windows base layer can point to a base layer parent that is not in manifest.
		return errors.New("Invalid parent ID in the base layer of the image.")
	}

	// check general duplicates to error instead of a deadlock
	idmap := make(map[string]struct{})

	var lastID string
	for _, img := range images {
		// skip IDs that appear after each other, we handle those later
		if _, exists := idmap[img.ID]; img.ID != lastID && exists {
			return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
		}
		lastID = img.ID
		idmap[lastID] = struct{}{}
	}

	// backwards loop so that we keep the remaining indexes after removing items
	for i := len(images) - 2; i >= 0; i-- {
		if images[i].ID == images[i+1].ID { // repeated ID. remove and continue
			m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
			m.History = append(m.History[:i], m.History[i+1:]...)
		} else if images[i].Parent != images[i+1].ID {
			return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", images[i+1].ID, images[i].Parent)
		}
	}

	return nil
}
예제 #11
0
func (fetcher *RemoteV2Fetcher) fetchImagesMetadata(request *FetchRequest) ([]*image.Image, *registry.RequestAuthorization, []digest.Digest, error) {
	request.Logger.Debug("docker-v2-fetch", lager.Data{
		"request": request,
	})

	auth, err := request.Session.GetV2Authorization(request.Endpoint, request.RemotePath, true)
	if err != nil {
		return nil, nil, nil, FetchError("GetV2Authorization", request.Endpoint.URL.Host, request.Path, err)
	}

	_, manifestBytes, err := request.Session.GetV2ImageManifest(request.Endpoint, request.RemotePath, request.Tag, auth)
	if err != nil {
		return nil, nil, nil, FetchError("GetV2ImageManifest", request.Endpoint.URL.Host, request.Path, err)
	}

	var manifest registry.ManifestData
	if err := json.Unmarshal(manifestBytes, &manifest); err != nil {
		return nil, nil, nil, FetchError("UnmarshalManifest", request.Endpoint.URL.Host, request.Path, err)
	}

	var hashes []digest.Digest
	var images []*image.Image

	for index, layer := range manifest.FSLayers {
		hash, err := digest.ParseDigest(layer.BlobSum)
		if err != nil {
			return nil, nil, nil, FetchError("ParseDigest", request.Endpoint.URL.Host, request.Path, err)
		}

		img, err := image.NewImgJSON([]byte(manifest.History[index].V1Compatibility))
		if err != nil {
			return nil, nil, nil, FetchError("NewImgJSON", request.Endpoint.URL.Host, request.Path, err)
		}

		images = append(images, img)
		hashes = append(hashes, hash)
	}

	return images, auth, hashes, nil
}
예제 #12
0
// CmdSet stores a new image in the graph.
// Images are stored in the graph using 4 elements:
//	- A user-defined ID
//	- A collection of metadata describing the image
//	- A directory tree stored as a tar archive (also called the "layer")
//	- A reference to a "parent" ID on top of which the layer should be applied
//
// NOTE: even though the parent ID is only useful in relation to the layer and how
// to apply it (ie you could represent the full directory tree as 'parent_layer + layer',
// it is treated as a top-level property of the image. This is an artifact of early
// design and should probably be cleaned up in the future to simplify the design.
//
// Syntax: image_set ID
// Input:
//	- Layer content must be streamed in tar format on stdin. An empty input is
//	valid and represents a nil layer.
//
//	- Image metadata must be passed in the command environment.
//		'json': a json-encoded object with all image metadata.
//			It will be stored as-is, without any encoding/decoding artifacts.
//			That is a requirement of the current registry client implementation,
//			because a re-encoded json might invalidate the image checksum at
//			the next upload, even with functionaly identical content.
func (s *TagStore) CmdSet(job *engine.Job) error {
	if len(job.Args) != 1 {
		return fmt.Errorf("usage: %s NAME", job.Name)
	}
	var (
		imgJSON = []byte(job.Getenv("json"))
		layer   = job.Stdin
	)
	if len(imgJSON) == 0 {
		return fmt.Errorf("mandatory key 'json' is not set")
	}
	// We have to pass an *image.Image object, even though it will be completely
	// ignored in favor of the redundant json data.
	// FIXME: the current prototype of Graph.Register is redundant.
	img, err := image.NewImgJSON(imgJSON)
	if err != nil {
		return err
	}
	if err := s.graph.Register(img, layer); err != nil {
		return err
	}
	return nil
}
예제 #13
0
파일: push.go 프로젝트: pombredanne/docker
func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter) error {
	if repoInfo.Official {
		j := eng.Job("trust_update_base")
		if err := j.Run(); err != nil {
			log.Errorf("error updating trust base graph: %s", err)
		}
	}

	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
	if err != nil {
		if repoInfo.Index.Official {
			log.Debugf("Unable to push to V2 registry, falling back to v1: %s", err)
			return ErrV2RegistryUnavailable
		}
		return fmt.Errorf("error getting registry endpoint: %s", err)
	}

	tags, err := s.getImageTags(repoInfo.LocalName, tag)
	if err != nil {
		return err
	}
	if len(tags) == 0 {
		return fmt.Errorf("No tags to push for %s", repoInfo.LocalName)
	}

	auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false)
	if err != nil {
		return fmt.Errorf("error getting authorization: %s", err)
	}

	for _, tag := range tags {
		log.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag)
		mBytes, err := s.newManifest(repoInfo.LocalName, repoInfo.RemoteName, tag)
		if err != nil {
			return err
		}
		js, err := libtrust.NewJSONSignature(mBytes)
		if err != nil {
			return err
		}

		if err = js.Sign(s.trustKey); err != nil {
			return err
		}

		signedBody, err := js.PrettySignature("signatures")
		if err != nil {
			return err
		}
		log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID())

		manifestBytes := string(signedBody)

		manifest, verified, err := s.loadManifest(eng, signedBody)
		if err != nil {
			return fmt.Errorf("error verifying manifest: %s", err)
		}

		if err := checkValidManifest(manifest); err != nil {
			return fmt.Errorf("invalid manifest: %s", err)
		}

		if verified {
			log.Infof("Pushing verified image, key %s is registered for %q", s.trustKey.KeyID(), repoInfo.RemoteName)
		}

		for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
			var (
				sumStr  = manifest.FSLayers[i].BlobSum
				imgJSON = []byte(manifest.History[i].V1Compatibility)
			)

			sumParts := strings.SplitN(sumStr, ":", 2)
			if len(sumParts) < 2 {
				return fmt.Errorf("Invalid checksum: %s", sumStr)
			}
			manifestSum := sumParts[1]

			img, err := image.NewImgJSON(imgJSON)
			if err != nil {
				return fmt.Errorf("Failed to parse json: %s", err)
			}

			// Call mount blob
			exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth)
			if err != nil {
				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil))
				return err
			}

			if !exists {
				if err := s.pushV2Image(r, img, endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, sf, out, auth); err != nil {
					return err
				}
			} else {
				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image already exists", nil))
			}
		}

		// push the manifest
		if err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader([]byte(manifestBytes)), auth); err != nil {
			return err
		}
	}
	return nil
}
예제 #14
0
func (p *v1Puller) pullImage(out io.Writer, imgID, endpoint string) (layersDownloaded bool, err error) {
	var history []string
	history, err = p.session.GetRemoteHistory(imgID, endpoint)
	if err != nil {
		return false, err
	}
	out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
	// FIXME: Try to stream the images?
	// FIXME: Launch the getRemoteImage() in goroutines

	sessionID := p.session.ID()
	// As imgID has been retained in pullRepository, no need to retain again
	p.graph.Retain(sessionID, history[1:]...)
	defer p.graph.Release(sessionID, history[1:]...)

	layersDownloaded = false
	for i := len(history) - 1; i >= 0; i-- {
		id := history[i]

		// ensure no two downloads of the same layer happen at the same time
		poolKey := "layer:" + id
		broadcaster, found := p.poolAdd("pull", poolKey)
		broadcaster.Add(out)
		if found {
			logrus.Debugf("Image (id: %s) pull is already running, skipping", id)
			err = broadcaster.Wait()
			if err != nil {
				return layersDownloaded, err
			}
			continue
		}

		// This must use a closure so it captures the value of err when
		// the function returns, not when the 'defer' is evaluated.
		defer func() {
			p.poolRemoveWithError("pull", poolKey, err)
		}()

		if !p.graph.Exists(id) {
			broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
			var (
				imgJSON []byte
				imgSize int64
				err     error
				img     *image.Image
			)
			retries := 5
			for j := 1; j <= retries; j++ {
				imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint)
				if err != nil && j == retries {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				}
				img, err = image.NewImgJSON(imgJSON)
				layersDownloaded = true
				if err != nil && j == retries {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else {
					break
				}
			}

			for j := 1; j <= retries; j++ {
				// Get the layer
				status := "Pulling fs layer"
				if j > 1 {
					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
				}
				broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
				layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, imgSize)
				if uerr, ok := err.(*url.Error); ok {
					err = uerr.Err
				}
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				}
				layersDownloaded = true
				defer layer.Close()

				err = p.graph.Register(img,
					progressreader.New(progressreader.Config{
						In:        layer,
						Out:       broadcaster,
						Formatter: p.sf,
						Size:      imgSize,
						NewLines:  false,
						ID:        stringid.TruncateID(id),
						Action:    "Downloading",
					}))
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
					return layersDownloaded, err
				} else {
					break
				}
			}
		}
		broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
		broadcaster.Close()
	}
	return layersDownloaded, nil
}
예제 #15
0
파일: pull_v2.go 프로젝트: ndeloof/docker
func (p *v2Puller) pullV2Tag(out io.Writer, tag, taggedName string) (verified bool, err error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	manifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	verified, err = p.validateManifest(manifest, tag)
	if err != nil {
		return false, err
	}
	if verified {
		logrus.Printf("Image manifest for %s has been verified", taggedName)
	}

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	var layerIDs []string
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)

		for _, d := range downloads {
			p.poolRemoveWithError("pull", d.poolKey, err)
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}
	}()

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility))
		if err != nil {
			logrus.Debugf("error getting image v1 json: %v", err)
			return false, err
		}
		p.graph.Retain(p.sessionID, img.ID)
		layerIDs = append(layerIDs, img.ID)

		// Check if exists
		if p.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Already exists", nil))
			continue
		}
		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		d := &downloadInfo{
			img:     img,
			poolKey: "layer:" + img.ID,
			digest:  manifest.FSLayers[i].BlobSum,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
		}

		tmpFile, err := ioutil.TempFile("", "GetImageBlob")
		if err != nil {
			return false, err
		}
		d.tmpFile = tmpFile

		downloads = append(downloads, d)

		broadcaster, found := p.poolAdd("pull", d.poolKey)
		broadcaster.Add(out)
		d.broadcaster = broadcaster
		if found {
			d.err <- nil
		} else {
			go p.download(d)
		}
	}

	var tagUpdated bool
	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}

		if d.layer == nil {
			// Wait for a different pull to download and extract
			// this layer.
			err = d.broadcaster.Wait()
			if err != nil {
				return false, err
			}
			continue
		}

		d.tmpFile.Seek(0, 0)
		reader := progressreader.New(progressreader.Config{
			In:        d.tmpFile,
			Out:       d.broadcaster,
			Formatter: p.sf,
			Size:      d.size,
			NewLines:  false,
			ID:        stringid.TruncateID(d.img.ID),
			Action:    "Extracting",
		})

		err = p.graph.Register(d.img, reader)
		if err != nil {
			return false, err
		}

		if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
			return false, err
		}

		d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
		d.broadcaster.Close()
		tagUpdated = true
	}

	manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	firstID := layerIDs[len(layerIDs)-1]
	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
예제 #16
0
func (p *v1Puller) pullImage(imgID, endpoint string, token []string) (bool, error) {
	history, err := p.session.GetRemoteHistory(imgID, endpoint)
	if err != nil {
		return false, err
	}
	out := p.config.OutStream
	out.Write(p.sf.FormatProgress(stringid.TruncateID(imgID), "Pulling dependent layers", nil))
	// FIXME: Try to stream the images?
	// FIXME: Launch the getRemoteImage() in goroutines

	sessionID := p.session.ID()
	// As imgID has been retained in pullRepository, no need to retain again
	p.graph.Retain(sessionID, history[1:]...)
	defer p.graph.Release(sessionID, history[1:]...)

	layersDownloaded := false
	for i := len(history) - 1; i >= 0; i-- {
		id := history[i]

		// ensure no two downloads of the same layer happen at the same time
		if c, err := p.poolAdd("pull", "layer:"+id); err != nil {
			logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
			<-c
		}
		defer p.poolRemove("pull", "layer:"+id)

		if !p.graph.Exists(id) {
			out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Pulling metadata", nil))
			var (
				imgJSON []byte
				imgSize int
				err     error
				img     *image.Image
			)
			retries := 5
			for j := 1; j <= retries; j++ {
				imgJSON, imgSize, err = p.session.GetRemoteImageJSON(id, endpoint)
				if err != nil && j == retries {
					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				}
				img, err = image.NewImgJSON(imgJSON)
				layersDownloaded = true
				if err != nil && j == retries {
					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, fmt.Errorf("Failed to parse json: %s", err)
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else {
					break
				}
			}

			for j := 1; j <= retries; j++ {
				// Get the layer
				status := "Pulling fs layer"
				if j > 1 {
					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
				}
				out.Write(p.sf.FormatProgress(stringid.TruncateID(id), status, nil))
				layer, err := p.session.GetRemoteImageLayer(img.ID, endpoint, int64(imgSize))
				if uerr, ok := err.(*url.Error); ok {
					err = uerr.Err
				}
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error pulling dependent layers", nil))
					return layersDownloaded, err
				}
				layersDownloaded = true
				defer layer.Close()

				err = p.graph.Register(v1ImageDescriptor{img},
					progressreader.New(progressreader.Config{
						In:        layer,
						Out:       out,
						Formatter: p.sf,
						Size:      imgSize,
						NewLines:  false,
						ID:        stringid.TruncateID(id),
						Action:    "Downloading",
					}))
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Error downloading dependent layers", nil))
					return layersDownloaded, err
				} else {
					break
				}
			}
		}
		out.Write(p.sf.FormatProgress(stringid.TruncateID(id), "Download complete", nil))
	}
	return layersDownloaded, nil
}
예제 #17
0
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) (bool, error) {
	log.Debugf("Pulling tag from V2 registry: %q", tag)
	manifestBytes, err := r.GetV2ImageManifest(remoteName, tag, nil)
	if err != nil {
		return false, err
	}

	manifest, verified, err := s.verifyManifest(eng, manifestBytes)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if len(manifest.FSLayers) != len(manifest.History) {
		return false, fmt.Errorf("length of history not equal to number of layers")
	}

	if verified {
		out.Write(sf.FormatStatus(localName+":"+tag, "The image you are pulling has been verified"))
	} else {
		out.Write(sf.FormatStatus(tag, "Pulling from %s", localName))
	}

	if len(manifest.FSLayers) == 0 {
		return false, fmt.Errorf("no blobSums in manifest")
	}

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			log.Debugf("Image already exists: %s", img.ID)
			continue
		}

		chunks := strings.SplitN(sumStr, ":", 2)
		if len(chunks) < 2 {
			return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks)
		}
		sumType, checksum := chunks[0], chunks[1]
		out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
				} else {
					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(remoteName, sumType, checksum, nil)
				if err != nil {
					return err
				}
				defer r.Close()
				io.Copy(tmpFile, utils.ProgressReader(r, int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading"))

				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))

				log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		if parallel {
			downloads[i].err = make(chan error)
			go func(di *downloadInfo) {
				di.err <- downloadFunc(di)
			}(&downloads[i])
		} else {
			err := downloadFunc(&downloads[i])
			if err != nil {
				return false, err
			}
		}
	}

	var layersDownloaded bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			err := <-d.err
			if err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img, d.imgJSON,
					utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting"))
				if err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil))
			layersDownloaded = true
		} else {
			out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	if err = s.Set(localName, tag, downloads[0].img.ID, true); err != nil {
		return false, err
	}

	return layersDownloaded, nil
}
예제 #18
0
파일: push.go 프로젝트: hantuo/docker
func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out io.Writer, repoInfo *registry.RepositoryInfo, manifestBytes, tag string, sf *utils.StreamFormatter) error {
	if repoInfo.Official {
		j := eng.Job("trust_update_base")
		if err := j.Run(); err != nil {
			log.Errorf("error updating trust base graph: %s", err)
		}
	}

	endpoint, err := r.V2RegistryEndpoint(repoInfo.Index)
	if err != nil {
		return fmt.Errorf("error getting registry endpoint: %s", err)
	}
	auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false)
	if err != nil {
		return fmt.Errorf("error getting authorization: %s", err)
	}

	// if no manifest is given, generate and sign with the key associated with the local tag store
	if len(manifestBytes) == 0 {
		mBytes, err := s.newManifest(repoInfo.LocalName, repoInfo.RemoteName, tag)
		if err != nil {
			return err
		}
		js, err := libtrust.NewJSONSignature(mBytes)
		if err != nil {
			return err
		}

		if err = js.Sign(s.trustKey); err != nil {
			return err
		}

		signedBody, err := js.PrettySignature("signatures")
		if err != nil {
			return err
		}
		log.Infof("Signed manifest using daemon's key: %s", s.trustKey.KeyID())

		manifestBytes = string(signedBody)
	}

	manifest, verified, err := s.verifyManifest(eng, []byte(manifestBytes))
	if err != nil {
		return fmt.Errorf("error verifying manifest: %s", err)
	}

	if err := checkValidManifest(manifest); err != nil {
		return fmt.Errorf("invalid manifest: %s", err)
	}

	if !verified {
		log.Debugf("Pushing unverified image")
	}

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		sumParts := strings.SplitN(sumStr, ":", 2)
		if len(sumParts) < 2 {
			return fmt.Errorf("Invalid checksum: %s", sumStr)
		}
		manifestSum := sumParts[1]

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return fmt.Errorf("Failed to parse json: %s", err)
		}

		img, err = s.graph.Get(img.ID)
		if err != nil {
			return err
		}

		arch, err := img.TarLayer()
		if err != nil {
			return fmt.Errorf("Could not get tar layer: %s", err)
		}

		// Call mount blob
		exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth)
		if err != nil {
			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil))
			return err
		}
		if !exists {
			err = r.PutV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, utils.ProgressReader(arch, int(img.Size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth)
			if err != nil {
				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil))
				return err
			}
			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image successfully pushed", nil))
		} else {
			out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image already exists", nil))
		}
	}

	// push the manifest
	return r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader([]byte(manifestBytes)), auth)
}
예제 #19
0
파일: pull.go 프로젝트: ifa6/docker-1
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)

	remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if verified {
		logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	// downloadInfo is used to pass information from download to extractor
	type downloadInfo struct {
		imgJSON    []byte
		img        *image.Image
		digest     digest.Digest
		tmpFile    *os.File
		length     int64
		downloaded bool
		err        chan error
	}

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			logrus.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))
				} else {
					logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        stringid.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					return fmt.Errorf("image layer digest verification failed for %q", di.digest)
				}

				out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil))

				logrus.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		downloads[i].err = make(chan error)
		go func(di *downloadInfo) {
			di.err <- downloadFunc(di)
		}(&downloads[i])
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			if err := <-d.err; err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        stringid.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				if err := s.graph.SetDigest(d.img.ID, d.digest); err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if localDigest != remoteDigest { // this is not a verification check.
		// NOTE(stevvooe): This is a very defensive branch and should never
		// happen, since all manifest digest implementations use the same
		// algorithm.
		logrus.WithFields(
			logrus.Fields{
				"local":  localDigest,
				"remote": remoteDigest,
			}).Debugf("local digest does not match remote")

		out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest))
	}

	out.Write(sf.FormatStatus("", "Digest: %s", localDigest))

	if tag == localDigest.String() {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.

		if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil {
			return false, err
		}
	}

	if !utils.DigestReference(tag) {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}
예제 #20
0
파일: push.go 프로젝트: mikroio/mikro-cli
func (ib *ImageBundle) GetLayer(address string) (*image.Image, error) {
	imageJSON, _ := ib.GetLayerJSON(address)
	return image.NewImgJSON(imageJSON)
}
예제 #21
0
파일: merge.go 프로젝트: hustcat/docker-1.3
func (s *TagStore) pullAndMergeImage(r *registry.Session, out io.Writer, containerID, containerImage, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) {
	newHistory, err := r.GetRemoteHistory(imgID, endpoint, token)
	if err != nil {
		return false, err
	}
	oldHistory, err := r.GetRemoteHistory(containerImage, endpoint, token)
	if err != nil {
		return false, err
	}
	// Compare the differences between the two image
	compareHistory := make(map[string]string, len(oldHistory))
	for _, id := range oldHistory {
		compareHistory[id] = id
	}
	var history []string
	for _, id := range newHistory {
		if _, ok := compareHistory[id]; !ok {
			history = append(history, id)
		}
	}

	layers_downloaded := false
	for i := len(history) - 1; i >= 0; i-- {
		id := history[i]

		// ensure no two downloads of the same layer happen at the same time
		if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
			log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
			<-c
		}
		defer s.poolRemove("pull", "layer:"+id)

		out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
		var (
			imgJSON []byte
			imgSize int
			err     error
			img     *image.Image
		)
		retries := 5
		for j := 1; j <= retries; j++ {
			imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
			if err != nil && j == retries {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
				return layers_downloaded, err
			} else if err != nil {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			}
			img, err = image.NewImgJSON(imgJSON)
			layers_downloaded = true
			if err != nil && j == retries {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
				return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
			} else if err != nil {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			} else {
				break
			}
		}

		for j := 1; j <= retries; j++ {
			// Get the layer
			status := "Pulling fs layer"
			if j > 1 {
				status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
			}
			out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
			layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
			if uerr, ok := err.(*url.Error); ok {
				err = uerr.Err
			}
			if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			} else if err != nil {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
				return layers_downloaded, err
			}
			layers_downloaded = true
			defer layer.Close()
			if !s.graph.Exists(id) {
				// register when first pull layer
				err = s.graph.Register(img, imgJSON,
					utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"))
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
					return layers_downloaded, err
				}
			}
			// add layer to container
			dest := path.Join(s.graph.Driver().MountPath(), containerID, "rootfs")
			out.Write(sf.FormatProgress(utils.TruncateID(id), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil))
			err = archive.ApplyLayer(dest, layer)
			if err != nil && j == retries {
				out.Write(sf.FormatProgress(utils.TruncateID(id), "Error merge layers", nil))
				return layers_downloaded, err
			} else if err != nil {
				time.Sleep(time.Duration(j) * 500 * time.Millisecond)
				continue
			} else {
				break
			}
		}
	}
	return layers_downloaded, nil
}
예제 #22
0
func (fetcher *dockerMetadataFetcher) FetchMetadata(dockerPath string) (*ImageMetadata, error) {
	indexName, remoteName, tag, err := docker_repository_name_formatter.ParseRepoNameAndTagFromImageReference(dockerPath)
	if err != nil {
		return nil, err
	}

	var reposName string
	if len(indexName) > 0 {
		reposName = fmt.Sprintf("%s/%s", indexName, remoteName)
	} else {
		reposName = remoteName
	}

	var session DockerSession
	if session, err = fetcher.dockerSessionFactory.MakeSession(reposName, false); err != nil {
		if !strings.Contains(err.Error(), "this private registry supports only HTTP or HTTPS with an unknown CA certificate") {
			return nil, err
		}

		session, err = fetcher.dockerSessionFactory.MakeSession(reposName, true)
		if err != nil {
			return nil, err
		}
	}

	repoData, err := session.GetRepositoryData(remoteName)
	if err != nil {
		return nil, err
	}

	tagsList, err := session.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens)
	if err != nil {
		return nil, err
	}

	imgID, ok := tagsList[tag]
	if !ok {
		return nil, fmt.Errorf("Unknown tag: %s:%s", remoteName, tag)
	}

	var img *image.Image
	endpoint := repoData.Endpoints[0]
	imgJSON, _, err := session.GetRemoteImageJSON(imgID, endpoint, repoData.Tokens)
	if err != nil {
		return nil, err
	}

	img, err = image.NewImgJSON(imgJSON)
	if err != nil {
		return nil, fmt.Errorf("Error parsing remote image json for specified docker image:\n%s", err)
	}
	if img.Config == nil {
		return nil, fmt.Errorf("Parsing start command failed")
	}

	startCommand := append(img.Config.Entrypoint, img.Config.Cmd...)
	exposedPorts := sortPorts(img.ContainerConfig.ExposedPorts)

	return &ImageMetadata{
		WorkingDir:   img.Config.WorkingDir,
		User:         img.Config.User,
		StartCommand: startCommand,
		ExposedPorts: exposedPorts,
		Env:          img.Config.Env,
	}, nil
}
예제 #23
0
파일: pull.go 프로젝트: juju2013/docker
func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) {
	history, err := r.GetRemoteHistory(imgID, endpoint, token)
	if err != nil {
		return false, err
	}
	out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil))
	// FIXME: Try to stream the images?
	// FIXME: Launch the getRemoteImage() in goroutines

	layers_downloaded := false
	for i := len(history) - 1; i >= 0; i-- {
		id := history[i]

		// ensure no two downloads of the same layer happen at the same time
		if c, err := s.poolAdd("pull", "layer:"+id); err != nil {
			log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err)
			<-c
		}
		defer s.poolRemove("pull", "layer:"+id)

		if !s.graph.Exists(id) {
			out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil))
			var (
				imgJSON []byte
				imgSize int
				err     error
				img     *image.Image
			)
			retries := 5
			for j := 1; j <= retries; j++ {
				imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token)
				if err != nil && j == retries {
					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
					return layers_downloaded, err
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				}
				img, err = image.NewImgJSON(imgJSON)
				layers_downloaded = true
				if err != nil && j == retries {
					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
					return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err)
				} else if err != nil {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else {
					break
				}
			}

			for j := 1; j <= retries; j++ {
				// Get the layer
				status := "Pulling fs layer"
				if j > 1 {
					status = fmt.Sprintf("Pulling fs layer [retries: %d]", j)
				}
				out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil))
				layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize))
				if uerr, ok := err.(*url.Error); ok {
					err = uerr.Err
				}
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil))
					return layers_downloaded, err
				}
				layers_downloaded = true
				defer layer.Close()

				err = s.graph.Register(img,
					utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading"))
				if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries {
					time.Sleep(time.Duration(j) * 500 * time.Millisecond)
					continue
				} else if err != nil {
					out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil))
					return layers_downloaded, err
				} else {
					break
				}
			}
		}
		out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil))
	}
	return layers_downloaded, nil
}
예제 #24
0
파일: pull_v2.go 프로젝트: ro0gr/docker
func (p *v2Puller) pullV2Tag(tag, taggedName string) (bool, error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
	out := p.config.OutStream

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	manifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	verified, err := p.validateManifest(manifest, tag)
	if err != nil {
		return false, err
	}
	if verified {
		logrus.Printf("Image manifest for %s has been verified", taggedName)
	}

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	layerIDs := []string{}
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)
	}()

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility))
		if err != nil {
			logrus.Debugf("error getting image v1 json: %v", err)
			return false, err
		}
		downloads[i].img = img
		downloads[i].digest = manifest.FSLayers[i].BlobSum

		p.graph.Retain(p.sessionID, img.ID)
		layerIDs = append(layerIDs, img.ID)

		// Check if exists
		if p.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			continue
		}

		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		downloads[i].err = make(chan error)
		go p.download(&downloads[i])
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			if err := <-d.err; err != nil {
				return false, err
			}
		}
		if d.layer != nil {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {

				reader := progressreader.New(progressreader.Config{
					In:        d.tmpFile,
					Out:       out,
					Formatter: p.sf,
					Size:      int(d.size),
					NewLines:  false,
					ID:        stringid.TruncateID(d.img.ID),
					Action:    "Extracting",
				})

				err = p.graph.Register(d.img, reader)
				if err != nil {
					return false, err
				}

				if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
		}
	}

	manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
예제 #25
0
파일: pull_v2.go 프로젝트: ch3lo/docker
func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error) {
	logrus.Debugf("Pulling tag from V2 registry: %q", tag)
	out := p.config.OutStream

	manSvc, err := p.repo.Manifests(context.Background())
	if err != nil {
		return false, err
	}

	manifest, err := manSvc.GetByTag(tag)
	if err != nil {
		return false, err
	}
	verified, err = p.validateManifest(manifest, tag)
	if err != nil {
		return false, err
	}
	if verified {
		logrus.Printf("Image manifest for %s has been verified", taggedName)
	}

	// By using a pipeWriter for each of the downloads to write their progress
	// to, we can avoid an issue where this function returns an error but
	// leaves behind running download goroutines. By splitting the writer
	// with a pipe, we can close the pipe if there is any error, consequently
	// causing each download to cancel due to an error writing to this pipe.
	pipeReader, pipeWriter := io.Pipe()
	go func() {
		if _, err := io.Copy(out, pipeReader); err != nil {
			logrus.Errorf("error copying from layer download progress reader: %s", err)
		}
	}()
	defer func() {
		if err != nil {
			// All operations on the pipe are synchronous. This call will wait
			// until all current readers/writers are done using the pipe then
			// set the error. All successive reads/writes will return with this
			// error.
			pipeWriter.CloseWithError(errors.New("download canceled"))
		}
	}()

	out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))

	var downloads []*downloadInfo

	var layerIDs []string
	defer func() {
		p.graph.Release(p.sessionID, layerIDs...)
	}()

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		img, err := image.NewImgJSON([]byte(manifest.History[i].V1Compatibility))
		if err != nil {
			logrus.Debugf("error getting image v1 json: %v", err)
			return false, err
		}
		p.graph.Retain(p.sessionID, img.ID)
		layerIDs = append(layerIDs, img.ID)

		// Check if exists
		if p.graph.Exists(img.ID) {
			logrus.Debugf("Image already exists: %s", img.ID)
			out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Already exists", nil))
			continue
		}
		out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))

		d := &downloadInfo{
			img:    img,
			digest: manifest.FSLayers[i].BlobSum,
			// TODO: seems like this chan buffer solved hanging problem in go1.5,
			// this can indicate some deeper problem that somehow we never take
			// error from channel in loop below
			err: make(chan error, 1),
			out: pipeWriter,
		}
		downloads = append(downloads, d)

		go p.download(d)
	}

	// run clean for all downloads to prevent leftovers
	for _, d := range downloads {
		defer func(d *downloadInfo) {
			if d.tmpFile != nil {
				d.tmpFile.Close()
				if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
					logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
				}
			}
		}(d)
	}

	var tagUpdated bool
	for _, d := range downloads {
		if err := <-d.err; err != nil {
			return false, err
		}
		if d.layer == nil {
			continue
		}
		// if tmpFile is empty assume download and extracted elsewhere
		d.tmpFile.Seek(0, 0)
		reader := progressreader.New(progressreader.Config{
			In:        d.tmpFile,
			Out:       out,
			Formatter: p.sf,
			Size:      d.size,
			NewLines:  false,
			ID:        stringid.TruncateID(d.img.ID),
			Action:    "Extracting",
		})

		err = p.graph.Register(d.img, reader)
		if err != nil {
			return false, err
		}

		if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
			return false, err
		}

		// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
		out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
		tagUpdated = true
	}

	manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
	if err != nil {
		return false, err
	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := p.Get(p.repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		} else {
			tagUpdated = true
		}
	}

	if verified && tagUpdated {
		out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	firstID := layerIDs[len(layerIDs)-1]
	if utils.DigestReference(tag) {
		// TODO(stevvooe): Ideally, we should always set the digest so we can
		// use the digest whether we pull by it or not. Unfortunately, the tag
		// store treats the digest as a separate tag, meaning there may be an
		// untagged digest image that would seem to be dangling by a user.
		if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
			return false, err
		}
	}

	if manifestDigest != "" {
		out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	return tagUpdated, nil
}
예제 #26
0
파일: pull.go 프로젝트: juju2013/docker
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
	log.Debugf("Pulling tag from V2 registry: %q", tag)
	manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	manifest, verified, err := s.loadManifest(eng, manifestBytes)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if err := checkValidManifest(manifest); err != nil {
		return false, err
	}

	if verified {
		log.Printf("Image manifest for %s:%s has been verified", repoInfo.CanonicalName, tag)
	} else {
		out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))
	}

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			log.Debugf("Image already exists: %s", img.ID)
			continue
		}

		chunks := strings.SplitN(sumStr, ":", 2)
		if len(chunks) < 2 {
			return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks)
		}
		sumType, checksum := chunks[0], chunks[1]
		out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))
				} else {
					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, sumType, checksum, auth)
				if err != nil {
					return err
				}
				defer r.Close()

				// Wrap the reader with the appropriate TarSum reader.
				tarSumReader, err := tarsum.NewTarSumForLabel(r, true, sumType)
				if err != nil {
					return fmt.Errorf("unable to wrap image blob reader with TarSum: %s", err)
				}

				io.Copy(tmpFile, utils.ProgressReader(ioutil.NopCloser(tarSumReader), int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading"))

				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Verifying Checksum", nil))

				if finalChecksum := tarSumReader.Sum(nil); !strings.EqualFold(finalChecksum, sumStr) {
					return fmt.Errorf("image verification failed: checksum mismatch - expected %q but got %q", sumStr, finalChecksum)
				}

				out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil))

				log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		if parallel {
			downloads[i].err = make(chan error)
			go func(di *downloadInfo) {
				di.err <- downloadFunc(di)
			}(&downloads[i])
		} else {
			err := downloadFunc(&downloads[i])
			if err != nil {
				return false, err
			}
		}
	}

	var layersDownloaded bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			err := <-d.err
			if err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting"))
				if err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil))
			layersDownloaded = true
		} else {
			out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	out.Write(sf.FormatStatus(repoInfo.CanonicalName+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))

	if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
		return false, err
	}

	return layersDownloaded, nil
}
예제 #27
0
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) {
	log.Debugf("Pulling tag from V2 registry: %q", tag)

	manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth)
	if err != nil {
		return false, err
	}

	// loadManifest ensures that the manifest payload has the expected digest
	// if the tag is a digest reference.
	manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag)
	if err != nil {
		return false, fmt.Errorf("error verifying manifest: %s", err)
	}

	if err := checkValidManifest(manifest); err != nil {
		return false, err
	}

	if verified {
		log.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag))
	}
	out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName))

	downloads := make([]downloadInfo, len(manifest.FSLayers))

	for i := len(manifest.FSLayers) - 1; i >= 0; i-- {
		var (
			sumStr  = manifest.FSLayers[i].BlobSum
			imgJSON = []byte(manifest.History[i].V1Compatibility)
		)

		img, err := image.NewImgJSON(imgJSON)
		if err != nil {
			return false, fmt.Errorf("failed to parse json: %s", err)
		}
		downloads[i].img = img

		// Check if exists
		if s.graph.Exists(img.ID) {
			log.Debugf("Image already exists: %s", img.ID)
			continue
		}

		dgst, err := digest.ParseDigest(sumStr)
		if err != nil {
			return false, err
		}
		downloads[i].digest = dgst

		out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil))

		downloadFunc := func(di *downloadInfo) error {
			log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID)

			if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil {
				if c != nil {
					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil))
					<-c
					out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))
				} else {
					log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err)
				}
			} else {
				defer s.poolRemove("pull", "img:"+img.ID)
				tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob")
				if err != nil {
					return err
				}

				r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth)
				if err != nil {
					return err
				}
				defer r.Close()

				verifier, err := digest.NewDigestVerifier(di.digest)
				if err != nil {
					return err
				}

				if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{
					In:        ioutil.NopCloser(io.TeeReader(r, verifier)),
					Out:       out,
					Formatter: sf,
					Size:      int(l),
					NewLines:  false,
					ID:        common.TruncateID(img.ID),
					Action:    "Downloading",
				})); err != nil {
					return fmt.Errorf("unable to copy v2 image blob data: %s", err)
				}

				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil))

				if !verifier.Verified() {
					log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String())
					verified = false
				}

				out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil))

				log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name())
				di.tmpFile = tmpFile
				di.length = l
				di.downloaded = true
			}
			di.imgJSON = imgJSON

			return nil
		}

		if parallel {
			downloads[i].err = make(chan error)
			go func(di *downloadInfo) {
				di.err <- downloadFunc(di)
			}(&downloads[i])
		} else {
			err := downloadFunc(&downloads[i])
			if err != nil {
				return false, err
			}
		}
	}

	var tagUpdated bool
	for i := len(downloads) - 1; i >= 0; i-- {
		d := &downloads[i]
		if d.err != nil {
			err := <-d.err
			if err != nil {
				return false, err
			}
		}
		if d.downloaded {
			// if tmpFile is empty assume download and extracted elsewhere
			defer os.Remove(d.tmpFile.Name())
			defer d.tmpFile.Close()
			d.tmpFile.Seek(0, 0)
			if d.tmpFile != nil {
				err = s.graph.Register(d.img,
					progressreader.New(progressreader.Config{
						In:        d.tmpFile,
						Out:       out,
						Formatter: sf,
						Size:      int(d.length),
						ID:        common.TruncateID(d.img.ID),
						Action:    "Extracting",
					}))
				if err != nil {
					return false, err
				}

				// FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
			}
			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil))
			tagUpdated = true
		} else {
			out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil))
		}

	}

	// Check for new tag if no layers downloaded
	if !tagUpdated {
		repo, err := s.Get(repoInfo.LocalName)
		if err != nil {
			return false, err
		}
		if repo != nil {
			if _, exists := repo[tag]; !exists {
				tagUpdated = true
			}
		}
	}

	if verified && tagUpdated {
		out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
	}

	if manifestDigest != "" {
		out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest))
	}

	if utils.DigestReference(tag) {
		if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
			return false, err
		}
	} else {
		// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
		if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
			return false, err
		}
	}

	return tagUpdated, nil
}