Esempio n. 1
0
File: pull.go Progetto: devick/flynn
func (s *TagStore) CmdPull(job *engine.Job) error {
	if n := len(job.Args); n != 1 && n != 2 {
		return fmt.Errorf("Usage: %s IMAGE [TAG|DIGEST]", job.Name)
	}

	var (
		localName   = job.Args[0]
		tag         string
		sf          = utils.NewStreamFormatter(job.GetenvBool("json"))
		authConfig  = &registry.AuthConfig{}
		metaHeaders map[string][]string
	)

	// Resolve the Repository name from fqn to RepositoryInfo
	repoInfo, err := registry.ResolveRepositoryInfo(job, localName)
	if err != nil {
		return err
	}

	if len(job.Args) > 1 {
		tag = job.Args[1]
	}

	job.GetenvJson("authConfig", authConfig)
	job.GetenvJson("metaHeaders", &metaHeaders)

	c, err := s.poolAdd("pull", utils.ImageReference(repoInfo.LocalName, tag))
	if err != nil {
		if c != nil {
			// Another pull of the same repository is already taking place; just wait for it to finish
			job.Stdout.Write(sf.FormatStatus("", "Repository %s already being pulled by another client. Waiting.", repoInfo.LocalName))
			<-c
			return nil
		}
		return err
	}
	defer s.poolRemove("pull", utils.ImageReference(repoInfo.LocalName, tag))

	log.Debugf("pulling image from host %q with remote name %q", repoInfo.Index.Name, repoInfo.RemoteName)
	endpoint, err := repoInfo.GetEndpoint()
	if err != nil {
		return err
	}

	r, err := registry.NewSession(authConfig, registry.HTTPRequestFactory(metaHeaders), endpoint, true)
	if err != nil {
		return err
	}

	logName := repoInfo.LocalName
	if tag != "" {
		logName = utils.ImageReference(logName, tag)
	}

	if len(repoInfo.Index.Mirrors) == 0 && (repoInfo.Index.Official || endpoint.Version == registry.APIVersion2) {
		if repoInfo.Official {
			j := job.Eng.Job("trust_update_base")
			if err = j.Run(); err != nil {
				log.Errorf("error updating trust base graph: %s", err)
			}
		}

		log.Debugf("pulling v2 repository with local name %q", repoInfo.LocalName)
		if err := s.pullV2Repository(job.Eng, r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err == nil {
			if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
				log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
			}
			return nil
		} else if err != registry.ErrDoesNotExist && err != ErrV2RegistryUnavailable {
			log.Errorf("Error from V2 registry: %s", err)
		}

		log.Debug("image does not exist on v2 registry, falling back to v1")
	}

	log.Debugf("pulling v1 repository with local name %q", repoInfo.LocalName)
	if err = s.pullRepository(r, job.Stdout, repoInfo, tag, sf, job.GetenvBool("parallel")); err != nil {
		return err
	}

	if err = job.Eng.Job("log", "pull", logName, "").Run(); err != nil {
		log.Errorf("Error logging event 'pull' for %s: %s", logName, err)
	}

	return nil
}
Esempio n. 2
0
// attemptIDReuse does a best attempt to match verified compatibilityIDs
// already in the graph with the computed strongIDs so we can keep using them.
// This process will never fail but may just return the strongIDs if none of
// the compatibilityIDs exists or can be verified. If the strongIDs themselves
// fail verification, we deterministically generate alternate IDs to use until
// we find one that's available or already exists with the correct data.
func (p *v2Puller) attemptIDReuse(imgs []contentAddressableDescriptor) {
	// This function needs to be protected with a global lock, because it
	// locks multiple IDs at once, and there's no good way to make sure
	// the locking happens a deterministic order.
	p.graph.imagesMutex.Lock()
	defer p.graph.imagesMutex.Unlock()

	idMap := make(map[string]struct{})
	for _, img := range imgs {
		idMap[img.id] = struct{}{}
		idMap[img.compatibilityID] = struct{}{}

		if p.graph.Exists(img.compatibilityID) {
			if _, err := p.graph.GenerateV1CompatibilityChain(img.compatibilityID); err != nil {
				logrus.Debugf("Migration v1Compatibility generation error: %v", err)
				return
			}
		}
	}
	for id := range idMap {
		p.graph.imageMutex.Lock(id)
		defer p.graph.imageMutex.Unlock(id)
	}

	// continueReuse controls whether the function will try to find
	// existing layers on disk under the old v1 IDs, to avoid repulling
	// them. The hashes are checked to ensure these layers are okay to
	// use. continueReuse starts out as true, but is set to false if
	// the code encounters something that doesn't match the expected hash.
	continueReuse := true

	for i := len(imgs) - 1; i >= 0; i-- {
		if p.graph.Exists(imgs[i].id) {
			// Found an image in the graph under the strongID. Validate the
			// image before using it.
			if err := p.validateImageInGraph(imgs[i].id, imgs, i); err != nil {
				continueReuse = false
				logrus.Debugf("not using existing strongID: %v", err)

				// The strong ID existed in the graph but didn't
				// validate successfully. We can't use the strong ID
				// because it didn't validate successfully. Treat the
				// graph like a hash table with probing... compute
				// SHA256(id) until we find an ID that either doesn't
				// already exist in the graph, or has existing content
				// that validates successfully.
				for {
					if err := p.tryNextID(imgs, i, idMap); err != nil {
						logrus.Debug(err.Error())
					} else {
						break
					}
				}
			}
			continue
		}

		if continueReuse {
			compatibilityID := imgs[i].compatibilityID
			if err := p.validateImageInGraph(compatibilityID, imgs, i); err != nil {
				logrus.Debugf("stopping ID reuse: %v", err)
				continueReuse = false
			} else {
				// The compatibility ID exists in the graph and was
				// validated. Use it.
				imgs[i].id = compatibilityID
			}
		}
	}

	// fix up the parents of the images
	for i := 0; i < len(imgs); i++ {
		if i == len(imgs)-1 { // Base layer
			imgs[i].parent = ""
		} else {
			imgs[i].parent = imgs[i+1].id
		}
	}
}