func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) error { var layersDownloaded bool if tag == "" { log.Debugf("Pulling tag list from V2 registry for %s", remoteName) tags, err := r.GetV2RemoteTags(remoteName, nil) if err != nil { return err } for _, t := range tags { if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, t, sf, parallel); err != nil { return err } else if downloaded { layersDownloaded = true } } } else { if downloaded, err := s.pullV2Tag(eng, r, out, localName, remoteName, tag, sf, parallel); err != nil { return err } else if downloaded { layersDownloaded = true } } requestedTag := localName if len(tag) > 0 { requestedTag = localName + ":" + tag } WriteStatus(requestedTag, out, sf, layersDownloaded) return nil }
// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName, sumType, sumStr string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) error { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Buffering to Disk", nil)) image, err := s.graph.Get(img.ID) if err != nil { return err } arch, err := image.TarLayer() if err != nil { return err } tf, err := s.graph.newTempFile() if err != nil { return err } defer func() { tf.Close() os.Remove(tf.Name()) }() size, err := bufferToFile(tf, arch) if err != nil { return err } // Send the layer log.Debugf("rendered layer for %s of [%d] size", img.ID, size) if err := r.PutV2ImageBlob(endpoint, imageName, sumType, sumStr, utils.ProgressReader(tf, int(size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth); err != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) return err } out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image successfully pushed", nil)) return nil }
func (s *TagStore) pushImageToEndpoint(endpoint string, out io.Writer, remoteName string, imageIDs []string, tags map[string][]string, repo *registry.RepositoryData, sf *utils.StreamFormatter, r *registry.Session) error { workerCount := len(imageIDs) // start a maximum of 5 workers to check if images exist on the specified endpoint. if workerCount > 5 { workerCount = 5 } var ( wg = &sync.WaitGroup{} imageData = make(chan imagePushData, workerCount*2) imagesToPush = make(chan string, workerCount*2) pushes = make(chan map[string]struct{}, 1) ) for i := 0; i < workerCount; i++ { wg.Add(1) go lookupImageOnEndpoint(wg, r, out, sf, imageData, imagesToPush) } // start a go routine that consumes the images to push go func() { shouldPush := make(map[string]struct{}) for id := range imagesToPush { shouldPush[id] = struct{}{} } pushes <- shouldPush }() for _, id := range imageIDs { imageData <- imagePushData{ id: id, endpoint: endpoint, tokens: repo.Tokens, } } // close the channel to notify the workers that there will be no more images to check. close(imageData) wg.Wait() close(imagesToPush) // wait for all the images that require pushes to be collected into a consumable map. shouldPush := <-pushes // finish by pushing any images and tags to the endpoint. The order that the images are pushed // is very important that is why we are still iterating over the ordered list of imageIDs. for _, id := range imageIDs { if _, push := shouldPush[id]; push { if _, err := s.pushImage(r, out, id, endpoint, repo.Tokens, sf); err != nil { // FIXME: Continue on error? return err } } for _, tag := range tags[id] { out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", common.TruncateID(id), endpoint+"repositories/"+remoteName+"/tags/"+tag)) if err := r.PushRegistryTag(remoteName, id, tag, endpoint, repo.Tokens); err != nil { return err } } } return nil }
// lookupImageOnEndpoint checks the specified endpoint to see if an image exists // and if it is absent then it sends the image id to the channel to be pushed. func lookupImageOnEndpoint(wg *sync.WaitGroup, r *registry.Session, out io.Writer, sf *utils.StreamFormatter, images chan imagePushData, imagesToPush chan string) { defer wg.Done() for image := range images { if err := r.LookupRemoteImage(image.id, image.endpoint, image.tokens); err != nil { log.Errorf("Error in LookupRemoteImage: %s", err) imagesToPush <- image.id continue } out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", common.TruncateID(image.id))) } }
// PushV2Image pushes the image content to the v2 registry, first buffering the contents to disk func (s *TagStore) pushV2Image(r *registry.Session, img *image.Image, endpoint *registry.Endpoint, imageName string, sf *utils.StreamFormatter, out io.Writer, auth *registry.RequestAuthorization) (string, error) { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Buffering to Disk", nil)) image, err := s.graph.Get(img.ID) if err != nil { return "", err } arch, err := image.TarLayer() if err != nil { return "", err } defer arch.Close() tf, err := s.graph.newTempFile() if err != nil { return "", err } defer func() { tf.Close() os.Remove(tf.Name()) }() h := sha256.New() size, err := bufferToFile(tf, io.TeeReader(arch, h)) if err != nil { return "", err } dgst := digest.NewDigest("sha256", h) // Send the layer log.Debugf("rendered layer for %s of [%d] size", img.ID, size) if err := r.PutV2ImageBlob(endpoint, imageName, dgst.Algorithm(), dgst.Hex(), progressreader.New(progressreader.Config{ In: tf, Out: out, Formatter: sf, Size: int(size), NewLines: false, ID: common.TruncateID(img.ID), Action: "Pushing", }), auth); err != nil { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil)) return "", err } out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image successfully pushed", nil)) return dgst.String(), nil }
func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) error { if tag == "" { log.Debugf("Pulling tag list from V2 registry for %s", remoteName) tags, err := r.GetV2RemoteTags(remoteName, nil) if err != nil { return err } for _, t := range tags { if err := s.pullV2Tag(eng, r, out, localName, remoteName, t, sf, parallel); err != nil { return err } } } else { if err := s.pullV2Tag(eng, r, out, localName, remoteName, tag, sf, parallel); err != nil { return err } } return nil }
// pushRepository pushes layers that do not already exist on the registry. func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { log.Debugf("Local repo: %s", localRepo) out = utils.NewWriteFlusher(out) imgList, tags, err := s.getImageList(localRepo, tag) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) imageIndex := s.createImageIndex(imgList, tags) log.Debugf("Preparing to push %s with the following images and tags", localRepo) for _, data := range imageIndex { log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err := r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, false, nil) if err != nil { return err } nTag := 1 if tag == "" { nTag = len(localRepo) } out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", repoInfo.CanonicalName, nTag)) // push the repository to each of the endpoints only if it does not exist. for _, endpoint := range repoData.Endpoints { if err := s.pushImageToEndpoint(endpoint, out, repoInfo.RemoteName, imgList, tags, repoData, sf, r); err != nil { return err } } _, err = r.PushImageJSONIndex(repoInfo.RemoteName, imageIndex, true, repoData.Endpoints) return err }
func (s *TagStore) pushImage(r *registry.Session, out io.Writer, imgID, ep string, token []string, sf *utils.StreamFormatter) (checksum string, err error) { out = utils.NewWriteFlusher(out) jsonRaw, err := ioutil.ReadFile(path.Join(s.graph.Root, imgID, "json")) if err != nil { return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err) } out.Write(sf.FormatProgress(common.TruncateID(imgID), "Pushing", nil)) imgData := ®istry.ImgData{ ID: imgID, } // Send the json if err := r.PushImageJSONRegistry(imgData, jsonRaw, ep, token); err != nil { if err == registry.ErrAlreadyExists { out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image already pushed, skipping", nil)) return "", nil } return "", err } layerData, err := s.graph.TempLayerArchive(imgID, sf, out) if err != nil { return "", fmt.Errorf("Failed to generate layer archive: %s", err) } defer os.RemoveAll(layerData.Name()) // Send the layer log.Debugf("rendered layer for %s of [%d] size", imgData.ID, layerData.Size) checksum, checksumPayload, err := r.PushImageLayerRegistry(imgData.ID, progressreader.New(progressreader.Config{ In: layerData, Out: out, Formatter: sf, Size: int(layerData.Size), NewLines: false, ID: common.TruncateID(imgData.ID), Action: "Pushing", }), ep, token, jsonRaw) if err != nil { return "", err } imgData.Checksum = checksum imgData.ChecksumPayload = checksumPayload // Send the checksum if err := r.PushImageChecksumRegistry(imgData, ep, token); err != nil { return "", err } out.Write(sf.FormatProgress(common.TruncateID(imgData.ID), "Image successfully pushed", nil)) return imgData.Checksum, nil }
func (s *TagStore) pullV2Repository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter) error { endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { if repoInfo.Index.Official { logrus.Debugf("Unable to pull from V2 registry, falling back to v1: %s", err) return ErrV2RegistryUnavailable } return fmt.Errorf("error getting registry endpoint: %s", err) } auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, true) if err != nil { return fmt.Errorf("error getting authorization: %s", err) } if !auth.CanAuthorizeV2() { return ErrV2RegistryUnavailable } var layersDownloaded bool if tag == "" { logrus.Debugf("Pulling tag list from V2 registry for %s", repoInfo.CanonicalName) tags, err := r.GetV2RemoteTags(endpoint, repoInfo.RemoteName, auth) if err != nil { return err } if len(tags) == 0 { return registry.ErrDoesNotExist } for _, t := range tags { if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, t, sf, auth); err != nil { return err } else if downloaded { layersDownloaded = true } } } else { if downloaded, err := s.pullV2Tag(r, out, endpoint, repoInfo, tag, sf, auth); err != nil { return err } else if downloaded { layersDownloaded = true } } requestedTag := repoInfo.CanonicalName if len(tag) > 0 { requestedTag = utils.ImageReference(repoInfo.CanonicalName, tag) } WriteStatus(requestedTag, out, sf, layersDownloaded) return nil }
func (s *TagStore) pullV2Repository(eng *engine.Engine, r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool) error { endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { return fmt.Errorf("error getting registry endpoint: %s", err) } auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, true) if err != nil { return fmt.Errorf("error getting authorization: %s", err) } var layersDownloaded bool if tag == "" { log.Debugf("Pulling tag list from V2 registry for %s", repoInfo.CanonicalName) tags, err := r.GetV2RemoteTags(endpoint, repoInfo.RemoteName, auth) if err != nil { return err } if len(tags) == 0 { return registry.ErrDoesNotExist } for _, t := range tags { if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, t, sf, parallel, auth); err != nil { return err } else if downloaded { layersDownloaded = true } } } else { if downloaded, err := s.pullV2Tag(eng, r, out, endpoint, repoInfo, tag, sf, parallel, auth); err != nil { return err } else if downloaded { layersDownloaded = true } } requestedTag := repoInfo.CanonicalName if len(tag) > 0 { requestedTag = repoInfo.CanonicalName + ":" + tag } WriteStatus(requestedTag, out, sf, layersDownloaded) return nil }
func (s *TagStore) pushRepository(r *registry.Session, out io.Writer, localName, remoteName string, localRepo map[string]string, tag string, sf *utils.StreamFormatter) error { out = utils.NewWriteFlusher(out) log.Debugf("Local repo: %s", localRepo) imgList, tagsByImage, err := s.getImageList(localRepo, tag) if err != nil { return err } out.Write(sf.FormatStatus("", "Sending image list")) var ( repoData *registry.RepositoryData imageIndex []*registry.ImgData ) for _, imgId := range imgList { if tags, exists := tagsByImage[imgId]; exists { // If an image has tags you must add an entry in the image index // for each tag for _, tag := range tags { imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: tag, }) } } else { // If the image does not have a tag it still needs to be sent to the // registry with an empty tag so that it is accociated with the repository imageIndex = append(imageIndex, ®istry.ImgData{ ID: imgId, Tag: "", }) } } log.Debugf("Preparing to push %s with the following images and tags", localRepo) for _, data := range imageIndex { log.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) } // Register all the images in a repository with the registry // If an image is not in this list it will not be associated with the repository repoData, err = r.PushImageJSONIndex(remoteName, imageIndex, false, nil) if err != nil { return err } nTag := 1 if tag == "" { nTag = len(localRepo) } for _, ep := range repoData.Endpoints { out.Write(sf.FormatStatus("", "Pushing repository %s (%d tags)", localName, nTag)) for _, imgId := range imgList { if r.LookupRemoteImage(imgId, ep, repoData.Tokens) { out.Write(sf.FormatStatus("", "Image %s already pushed, skipping", utils.TruncateID(imgId))) } else { if _, err := s.pushImage(r, out, remoteName, imgId, ep, repoData.Tokens, sf); err != nil { // FIXME: Continue on error? return err } } for _, tag := range tagsByImage[imgId] { out.Write(sf.FormatStatus("", "Pushing tag for rev [%s] on {%s}", utils.TruncateID(imgId), ep+"repositories/"+remoteName+"/tags/"+tag)) if err := r.PushRegistryTag(remoteName, imgId, tag, ep, repoData.Tokens); err != nil { return err } } } } if _, err := r.PushImageJSONIndex(remoteName, imageIndex, true, repoData.Endpoints); err != nil { return err } return nil }
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, localName, remoteName, tag string, sf *utils.StreamFormatter, parallel bool) (bool, error) { log.Debugf("Pulling tag from V2 registry: %q", tag) manifestBytes, err := r.GetV2ImageManifest(remoteName, tag, nil) if err != nil { return false, err } manifest, verified, err := s.verifyManifest(eng, manifestBytes) if err != nil { return false, fmt.Errorf("error verifying manifest: %s", err) } if len(manifest.FSLayers) != len(manifest.History) { return false, fmt.Errorf("length of history not equal to number of layers") } if verified { out.Write(sf.FormatStatus(localName+":"+tag, "The image you are pulling has been verified")) } else { out.Write(sf.FormatStatus(tag, "Pulling from %s", localName)) } if len(manifest.FSLayers) == 0 { return false, fmt.Errorf("no blobSums in manifest") } downloads := make([]downloadInfo, len(manifest.FSLayers)) for i := len(manifest.FSLayers) - 1; i >= 0; i-- { var ( sumStr = manifest.FSLayers[i].BlobSum imgJSON = []byte(manifest.History[i].V1Compatibility) ) img, err := image.NewImgJSON(imgJSON) if err != nil { return false, fmt.Errorf("failed to parse json: %s", err) } downloads[i].img = img // Check if exists if s.graph.Exists(img.ID) { log.Debugf("Image already exists: %s", img.ID) continue } chunks := strings.SplitN(sumStr, ":", 2) if len(chunks) < 2 { return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks) } sumType, checksum := chunks[0], chunks[1] out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil)) downloadFunc := func(di *downloadInfo) error { log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) } else { log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } } else { defer s.poolRemove("pull", "img:"+img.ID) tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") if err != nil { return err } r, l, err := r.GetV2ImageBlobReader(remoteName, sumType, checksum, nil) if err != nil { return err } defer r.Close() io.Copy(tmpFile, utils.ProgressReader(r, int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading")) out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) di.tmpFile = tmpFile di.length = l di.downloaded = true } di.imgJSON = imgJSON return nil } if parallel { downloads[i].err = make(chan error) go func(di *downloadInfo) { di.err <- downloadFunc(di) }(&downloads[i]) } else { err := downloadFunc(&downloads[i]) if err != nil { return false, err } } } var layersDownloaded bool for i := len(downloads) - 1; i >= 0; i-- { d := &downloads[i] if d.err != nil { err := <-d.err if err != nil { return false, err } } if d.downloaded { // if tmpFile is empty assume download and extracted elsewhere defer os.Remove(d.tmpFile.Name()) defer d.tmpFile.Close() d.tmpFile.Seek(0, 0) if d.tmpFile != nil { err = s.graph.Register(d.img, d.imgJSON, utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) if err != nil { return false, err } // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) } out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil)) layersDownloaded = true } else { out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil)) } } if err = s.Set(localName, tag, downloads[0].img.ID, true); err != nil { return false, err } return layersDownloaded, nil }
func (s *TagStore) pullMRepository(r *registry.Session, out io.Writer, containerID, containerImage, localName, remoteName, askedTag string, sf *utils.StreamFormatter, parallel bool, mirrors []string) error { out.Write(sf.FormatStatus("", "Pulling (parallel:%v) repository %s:%s", parallel, localName, askedTag)) if askedTag == "" { return fmt.Errorf("Error: tag is empty") } repoData, err := r.GetRepositoryData(remoteName) if err != nil { if strings.Contains(err.Error(), "HTTP code: 404") { return fmt.Errorf("Error: image %s not found", remoteName) } // Unexpected HTTP error return err } log.Debugf("Retrieving the tag list") tagsList, err := r.GetRemoteTags(repoData.Endpoints, remoteName, repoData.Tokens) if err != nil { log.Errorf("%v", err) return err } id, exists := tagsList[askedTag] if !exists { return fmt.Errorf("Tag %s not found in repository %s", askedTag, localName) } img, exists := repoData.ImgList[id] if !exists { return fmt.Errorf("Image ID %s not found in repository %s", id, localName) } img.Tag = askedTag var lastErr error success := false var is_downloaded bool if mirrors != nil { for _, ep := range mirrors { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling and merging image (%s) from %s, mirror: %s", img.Tag, localName, ep), nil)) if is_downloaded, err = s.pullAndMergeImage(r, out, containerID, containerImage, img.ID, ep, repoData.Tokens, sf); err != nil { // Don't report errors when pulling from mirrors. log.Debugf("Error pulling and merging image (%s) from %s, mirror: %s, %s", img.Tag, localName, ep, err) continue } success = true break } } if !success { for _, ep := range repoData.Endpoints { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling and merging image (%s) from %s, endpoint: %s", img.Tag, localName, ep), nil)) if is_downloaded, err = s.pullAndMergeImage(r, out, containerID, containerImage, img.ID, ep, repoData.Tokens, sf); err != nil { // It's not ideal that only the last error is returned, it would be better to concatenate the errors. // As the error is also given to the output stream the user will see the error. lastErr = err out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling and merging image (%s) from %s, endpoint: %s, %s", img.Tag, localName, ep, err), nil)) continue } success = true break } } if !success { err := fmt.Errorf("Error pulling and merging image (%s) from %s, %v", img.Tag, localName, lastErr) out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil)) return err } out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Layer is downloaded ? %v", is_downloaded), nil)) out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Complete", nil)) if err = s.Set(localName, img.Tag, img.ID, true); err != nil { return err } return nil }
func (s *TagStore) pullAndMergeImage(r *registry.Session, out io.Writer, containerID, containerImage, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) { newHistory, err := r.GetRemoteHistory(imgID, endpoint, token) if err != nil { return false, err } oldHistory, err := r.GetRemoteHistory(containerImage, endpoint, token) if err != nil { return false, err } // Compare the differences between the two image compareHistory := make(map[string]string, len(oldHistory)) for _, id := range oldHistory { compareHistory[id] = id } var history []string for _, id := range newHistory { if _, ok := compareHistory[id]; !ok { history = append(history, id) } } layers_downloaded := false for i := len(history) - 1; i >= 0; i-- { id := history[i] // ensure no two downloads of the same layer happen at the same time if c, err := s.poolAdd("pull", "layer:"+id); err != nil { log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) <-c } defer s.poolRemove("pull", "layer:"+id) out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) var ( imgJSON []byte imgSize int err error img *image.Image ) retries := 5 for j := 1; j <= retries; j++ { imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) if err != nil && j == retries { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, err } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } img, err = image.NewImgJSON(imgJSON) layers_downloaded = true if err != nil && j == retries { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else { break } } for j := 1; j <= retries; j++ { // Get the layer status := "Pulling fs layer" if j > 1 { status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) } out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil)) layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) if uerr, ok := err.(*url.Error); ok { err = uerr.Err } if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, err } layers_downloaded = true defer layer.Close() if !s.graph.Exists(id) { // register when first pull layer err = s.graph.Register(img, imgJSON, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) return layers_downloaded, err } } // add layer to container dest := path.Join(s.graph.Driver().MountPath(), containerID, "rootfs") out.Write(sf.FormatProgress(utils.TruncateID(id), fmt.Sprintf("Merge layer to container rootfs %s", dest), nil)) err = archive.ApplyLayer(dest, layer) if err != nil && j == retries { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error merge layers", nil)) return layers_downloaded, err } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else { break } } } return layers_downloaded, nil }
// TODO rewrite this whole PoC func main() { flag.Usage = func() { flag.PrintDefaults() } flag.Parse() if debug { os.Setenv("DEBUG", "1") log.SetLevel(log.DebugLevel) } if flag.NArg() == 0 { fmt.Println("ERROR: no image names provided") flag.Usage() os.Exit(1) } // make tempDir tempDir, err := ioutil.TempDir("", "docker-fetch-") if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } defer os.RemoveAll(tempDir) fetcher := NewFetcher(tempDir) sc := registry.NewServiceConfig(rOptions) for _, arg := range flag.Args() { remote, tagName := parsers.ParseRepositoryTag(arg) if tagName == "" { tagName = "latest" } repInfo, err := sc.NewRepositoryInfo(remote) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("%#v %q\n", repInfo, tagName) idx, err := repInfo.GetEndpoint() if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fmt.Fprintf(os.Stderr, "Pulling %s:%s from %s\n", repInfo.RemoteName, tagName, idx) var session *registry.Session if s, ok := fetcher.sessions[idx.String()]; ok { session = s } else { // TODO(vbatts) obviously the auth and http factory shouldn't be nil here session, err = registry.NewSession(nil, nil, idx, timeout) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } rd, err := session.GetRepositoryData(repInfo.RemoteName) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("rd: %#v", rd) // produce the "repositories" file for the archive if _, ok := fetcher.repositories[repInfo.RemoteName]; !ok { fetcher.repositories[repInfo.RemoteName] = graph.Repository{} } log.Debugf("repositories: %#v", fetcher.repositories) if len(rd.Endpoints) == 0 { log.Fatalf("expected registry endpoints, but received none from the index") } tags, err := session.GetRemoteTags(rd.Endpoints, repInfo.RemoteName, rd.Tokens) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if hash, ok := tags[tagName]; ok { fetcher.repositories[repInfo.RemoteName][tagName] = hash } log.Debugf("repositories: %#v", fetcher.repositories) imgList, err := session.GetRemoteHistory(fetcher.repositories[repInfo.RemoteName][tagName], rd.Endpoints[0], rd.Tokens) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("imgList: %#v", imgList) for _, imgID := range imgList { // pull layers and jsons buf, _, err := session.GetRemoteImageJSON(imgID, rd.Endpoints[0], rd.Tokens) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err = os.MkdirAll(filepath.Join(fetcher.Root, imgID), 0755); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh, err := os.Create(filepath.Join(fetcher.Root, imgID, "json")) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if _, err = fh.Write(buf); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh.Close() log.Debugf("%s", fh.Name()) tarRdr, err := session.GetRemoteImageLayer(imgID, rd.Endpoints[0], rd.Tokens, 0) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh, err = os.Create(filepath.Join(fetcher.Root, imgID, "layer.tar")) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } // the body is usually compressed gzRdr, err := gzip.NewReader(tarRdr) if err != nil { log.Debugf("image layer for %q is not gzipped", imgID) // the archive may not be gzipped, so just copy the stream if _, err = io.Copy(fh, tarRdr); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } else { // no error, so gzip decompress the stream if _, err = io.Copy(fh, gzRdr); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err = gzRdr.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } if err = tarRdr.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if err = fh.Close(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } log.Debugf("%s", fh.Name()) } } // marshal the "repositories" file for writing out log.Debugf("repositories: %q", fetcher.repositories) buf, err := json.Marshal(fetcher.repositories) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh, err := os.Create(filepath.Join(fetcher.Root, "repositories")) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if _, err = fh.Write(buf); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } fh.Close() log.Debugf("%s", fh.Name()) var output io.WriteCloser if outputStream == "-" { output = os.Stdout } else { output, err = os.Create(outputStream) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } } defer output.Close() if err = os.Chdir(fetcher.Root); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } tarStream, err := archive.Tar(".", archive.Uncompressed) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if _, err = io.Copy(output, tarStream); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } }
func (s *TagStore) pullRepository(r *registry.Session, out io.Writer, repoInfo *registry.RepositoryInfo, askedTag string, sf *utils.StreamFormatter, parallel bool) error { out.Write(sf.FormatStatus("", "Pulling repository %s", repoInfo.CanonicalName)) repoData, err := r.GetRepositoryData(repoInfo.RemoteName) if err != nil { if strings.Contains(err.Error(), "HTTP code: 404") { return fmt.Errorf("Error: image %s:%s not found", repoInfo.RemoteName, askedTag) } // Unexpected HTTP error return err } log.Debugf("Retrieving the tag list") tagsList, err := r.GetRemoteTags(repoData.Endpoints, repoInfo.RemoteName, repoData.Tokens) if err != nil { log.Errorf("unable to get remote tags: %s", err) return err } for tag, id := range tagsList { repoData.ImgList[id] = ®istry.ImgData{ ID: id, Tag: tag, Checksum: "", } } log.Debugf("Registering tags") // If no tag has been specified, pull them all var imageId string if askedTag == "" { for tag, id := range tagsList { repoData.ImgList[id].Tag = tag } } else { // Otherwise, check that the tag exists and use only that one id, exists := tagsList[askedTag] if !exists { return fmt.Errorf("Tag %s not found in repository %s", askedTag, repoInfo.CanonicalName) } imageId = id repoData.ImgList[id].Tag = askedTag } errors := make(chan error) layers_downloaded := false for _, image := range repoData.ImgList { downloadImage := func(img *registry.ImgData) { if askedTag != "" && img.Tag != askedTag { if parallel { errors <- nil } return } if img.Tag == "" { log.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) if parallel { errors <- nil } return } // ensure no two downloads of the same image happen at the same time if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) } else { log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } if parallel { errors <- nil } return } defer s.poolRemove("pull", "img:"+img.ID) out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s", img.Tag, repoInfo.CanonicalName), nil)) success := false var lastErr, err error var is_downloaded bool for _, ep := range repoInfo.Index.Mirrors { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { // Don't report errors when pulling from mirrors. log.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err) continue } layers_downloaded = layers_downloaded || is_downloaded success = true break } if !success { for _, ep := range repoData.Endpoints { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, endpoint: %s", img.Tag, repoInfo.CanonicalName, ep), nil)) if is_downloaded, err = s.pullImage(r, out, img.ID, ep, repoData.Tokens, sf); err != nil { // It's not ideal that only the last error is returned, it would be better to concatenate the errors. // As the error is also given to the output stream the user will see the error. lastErr = err out.Write(sf.FormatProgress(utils.TruncateID(img.ID), fmt.Sprintf("Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, repoInfo.CanonicalName, ep, err), nil)) continue } layers_downloaded = layers_downloaded || is_downloaded success = true break } } if !success { err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, repoInfo.CanonicalName, lastErr) out.Write(sf.FormatProgress(utils.TruncateID(img.ID), err.Error(), nil)) if parallel { errors <- err return } } out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) if parallel { errors <- nil } } if parallel { go downloadImage(image) } else { downloadImage(image) } } if parallel { var lastError error for i := 0; i < len(repoData.ImgList); i++ { if err := <-errors; err != nil { lastError = err } } if lastError != nil { return lastError } } for tag, id := range tagsList { if askedTag != "" && id != imageId { continue } if err := s.Set(repoInfo.LocalName, tag, id, true); err != nil { return err } } requestedTag := repoInfo.CanonicalName if len(askedTag) > 0 { requestedTag = repoInfo.CanonicalName + ":" + askedTag } WriteStatus(requestedTag, out, sf, layers_downloaded) return nil }
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) { log.Debugf("Pulling tag from V2 registry: %q", tag) manifestBytes, manifestDigest, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth) if err != nil { return false, err } // loadManifest ensures that the manifest payload has the expected digest // if the tag is a digest reference. manifest, verified, err := s.loadManifest(eng, manifestBytes, manifestDigest, tag) if err != nil { return false, fmt.Errorf("error verifying manifest: %s", err) } if err := checkValidManifest(manifest); err != nil { return false, err } if verified { log.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag)) } out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName)) downloads := make([]downloadInfo, len(manifest.FSLayers)) for i := len(manifest.FSLayers) - 1; i >= 0; i-- { var ( sumStr = manifest.FSLayers[i].BlobSum imgJSON = []byte(manifest.History[i].V1Compatibility) ) img, err := image.NewImgJSON(imgJSON) if err != nil { return false, fmt.Errorf("failed to parse json: %s", err) } downloads[i].img = img // Check if exists if s.graph.Exists(img.ID) { log.Debugf("Image already exists: %s", img.ID) continue } dgst, err := digest.ParseDigest(sumStr) if err != nil { return false, err } downloads[i].digest = dgst out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Pulling fs layer", nil)) downloadFunc := func(di *downloadInfo) error { log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) } else { log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } } else { defer s.poolRemove("pull", "img:"+img.ID) tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") if err != nil { return err } r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest.Algorithm(), di.digest.Hex(), auth) if err != nil { return err } defer r.Close() verifier, err := digest.NewDigestVerifier(di.digest) if err != nil { return err } if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ In: ioutil.NopCloser(io.TeeReader(r, verifier)), Out: out, Formatter: sf, Size: int(l), NewLines: false, ID: common.TruncateID(img.ID), Action: "Downloading", })); err != nil { return fmt.Errorf("unable to copy v2 image blob data: %s", err) } out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Verifying Checksum", nil)) if !verifier.Verified() { log.Infof("Image verification failed: checksum mismatch for %q", di.digest.String()) verified = false } out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Download complete", nil)) log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) di.tmpFile = tmpFile di.length = l di.downloaded = true } di.imgJSON = imgJSON return nil } if parallel { downloads[i].err = make(chan error) go func(di *downloadInfo) { di.err <- downloadFunc(di) }(&downloads[i]) } else { err := downloadFunc(&downloads[i]) if err != nil { return false, err } } } var tagUpdated bool for i := len(downloads) - 1; i >= 0; i-- { d := &downloads[i] if d.err != nil { err := <-d.err if err != nil { return false, err } } if d.downloaded { // if tmpFile is empty assume download and extracted elsewhere defer os.Remove(d.tmpFile.Name()) defer d.tmpFile.Close() d.tmpFile.Seek(0, 0) if d.tmpFile != nil { err = s.graph.Register(d.img, progressreader.New(progressreader.Config{ In: d.tmpFile, Out: out, Formatter: sf, Size: int(d.length), ID: common.TruncateID(d.img.ID), Action: "Extracting", })) if err != nil { return false, err } // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) } out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Pull complete", nil)) tagUpdated = true } else { out.Write(sf.FormatProgress(common.TruncateID(d.img.ID), "Already exists", nil)) } } // Check for new tag if no layers downloaded if !tagUpdated { repo, err := s.Get(repoInfo.LocalName) if err != nil { return false, err } if repo != nil { if _, exists := repo[tag]; !exists { tagUpdated = true } } } if verified && tagUpdated { out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.")) } if manifestDigest != "" { out.Write(sf.FormatStatus("", "Digest: %s", manifestDigest)) } if utils.DigestReference(tag) { if err = s.SetDigest(repoInfo.LocalName, tag, downloads[0].img.ID); err != nil { return false, err } } else { // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { return false, err } } return tagUpdated, nil }
func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter) error { if repoInfo.Official { j := eng.Job("trust_update_base") if err := j.Run(); err != nil { log.Errorf("error updating trust base graph: %s", err) } } endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { if repoInfo.Index.Official { log.Debugf("Unable to push to V2 registry, falling back to v1: %s", err) return ErrV2RegistryUnavailable } return fmt.Errorf("error getting registry endpoint: %s", err) } tags, err := s.getImageTags(repoInfo.LocalName, tag) if err != nil { return err } if len(tags) == 0 { return fmt.Errorf("No tags to push for %s", repoInfo.LocalName) } auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false) if err != nil { return fmt.Errorf("error getting authorization: %s", err) } for _, tag := range tags { log.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag) mBytes, err := s.newManifest(repoInfo.LocalName, repoInfo.RemoteName, tag) if err != nil { return err } js, err := libtrust.NewJSONSignature(mBytes) if err != nil { return err } if err = js.Sign(s.trustKey); err != nil { return err } signedBody, err := js.PrettySignature("signatures") if err != nil { return err } log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID()) manifestBytes := string(signedBody) manifest, verified, err := s.loadManifest(eng, signedBody) if err != nil { return fmt.Errorf("error verifying manifest: %s", err) } if err := checkValidManifest(manifest); err != nil { return fmt.Errorf("invalid manifest: %s", err) } if verified { log.Infof("Pushing verified image, key %s is registered for %q", s.trustKey.KeyID(), repoInfo.RemoteName) } for i := len(manifest.FSLayers) - 1; i >= 0; i-- { var ( sumStr = manifest.FSLayers[i].BlobSum imgJSON = []byte(manifest.History[i].V1Compatibility) ) sumParts := strings.SplitN(sumStr, ":", 2) if len(sumParts) < 2 { return fmt.Errorf("Invalid checksum: %s", sumStr) } manifestSum := sumParts[1] img, err := image.NewImgJSON(imgJSON) if err != nil { return fmt.Errorf("Failed to parse json: %s", err) } // Call mount blob exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth) if err != nil { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image push failed", nil)) return err } if !exists { if err := s.pushV2Image(r, img, endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, sf, out, auth); err != nil { return err } } else { out.Write(sf.FormatProgress(common.TruncateID(img.ID), "Image already exists", nil)) } } // push the manifest if err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader([]byte(manifestBytes)), auth); err != nil { return err } } return nil }
func (s *TagStore) pullV2Tag(r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *streamformatter.StreamFormatter, auth *registry.RequestAuthorization) (bool, error) { logrus.Debugf("Pulling tag from V2 registry: %q", tag) remoteDigest, manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth) if err != nil { return false, err } // loadManifest ensures that the manifest payload has the expected digest // if the tag is a digest reference. localDigest, manifest, verified, err := s.loadManifest(manifestBytes, tag, remoteDigest) if err != nil { return false, fmt.Errorf("error verifying manifest: %s", err) } if verified { logrus.Printf("Image manifest for %s has been verified", utils.ImageReference(repoInfo.CanonicalName, tag)) } out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName)) // downloadInfo is used to pass information from download to extractor type downloadInfo struct { imgJSON []byte img *image.Image digest digest.Digest tmpFile *os.File length int64 downloaded bool err chan error } downloads := make([]downloadInfo, len(manifest.FSLayers)) for i := len(manifest.FSLayers) - 1; i >= 0; i-- { var ( sumStr = manifest.FSLayers[i].BlobSum imgJSON = []byte(manifest.History[i].V1Compatibility) ) img, err := image.NewImgJSON(imgJSON) if err != nil { return false, fmt.Errorf("failed to parse json: %s", err) } downloads[i].img = img // Check if exists if s.graph.Exists(img.ID) { logrus.Debugf("Image already exists: %s", img.ID) continue } dgst, err := digest.ParseDigest(sumStr) if err != nil { return false, err } downloads[i].digest = dgst out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil)) downloadFunc := func(di *downloadInfo) error { logrus.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) } else { logrus.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } } else { defer s.poolRemove("pull", "img:"+img.ID) tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") if err != nil { return err } r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, di.digest, auth) if err != nil { return err } defer r.Close() verifier, err := digest.NewDigestVerifier(di.digest) if err != nil { return err } if _, err := io.Copy(tmpFile, progressreader.New(progressreader.Config{ In: ioutil.NopCloser(io.TeeReader(r, verifier)), Out: out, Formatter: sf, Size: int(l), NewLines: false, ID: stringid.TruncateID(img.ID), Action: "Downloading", })); err != nil { return fmt.Errorf("unable to copy v2 image blob data: %s", err) } out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Verifying Checksum", nil)) if !verifier.Verified() { return fmt.Errorf("image layer digest verification failed for %q", di.digest) } out.Write(sf.FormatProgress(stringid.TruncateID(img.ID), "Download complete", nil)) logrus.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) di.tmpFile = tmpFile di.length = l di.downloaded = true } di.imgJSON = imgJSON return nil } downloads[i].err = make(chan error) go func(di *downloadInfo) { di.err <- downloadFunc(di) }(&downloads[i]) } var tagUpdated bool for i := len(downloads) - 1; i >= 0; i-- { d := &downloads[i] if d.err != nil { if err := <-d.err; err != nil { return false, err } } if d.downloaded { // if tmpFile is empty assume download and extracted elsewhere defer os.Remove(d.tmpFile.Name()) defer d.tmpFile.Close() d.tmpFile.Seek(0, 0) if d.tmpFile != nil { err = s.graph.Register(d.img, progressreader.New(progressreader.Config{ In: d.tmpFile, Out: out, Formatter: sf, Size: int(d.length), ID: stringid.TruncateID(d.img.ID), Action: "Extracting", })) if err != nil { return false, err } if err := s.graph.SetDigest(d.img.ID, d.digest); err != nil { return false, err } // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) } out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil)) tagUpdated = true } else { out.Write(sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil)) } } // Check for new tag if no layers downloaded if !tagUpdated { repo, err := s.Get(repoInfo.LocalName) if err != nil { return false, err } if repo != nil { if _, exists := repo[tag]; !exists { tagUpdated = true } } else { tagUpdated = true } } if verified && tagUpdated { out.Write(sf.FormatStatus(utils.ImageReference(repoInfo.CanonicalName, tag), "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.")) } if localDigest != remoteDigest { // this is not a verification check. // NOTE(stevvooe): This is a very defensive branch and should never // happen, since all manifest digest implementations use the same // algorithm. logrus.WithFields( logrus.Fields{ "local": localDigest, "remote": remoteDigest, }).Debugf("local digest does not match remote") out.Write(sf.FormatStatus("", "Remote Digest: %s", remoteDigest)) } out.Write(sf.FormatStatus("", "Digest: %s", localDigest)) if tag == localDigest.String() { // TODO(stevvooe): Ideally, we should always set the digest so we can // use the digest whether we pull by it or not. Unfortunately, the tag // store treats the digest as a separate tag, meaning there may be an // untagged digest image that would seem to be dangling by a user. if err = s.SetDigest(repoInfo.LocalName, localDigest.String(), downloads[0].img.ID); err != nil { return false, err } } if !utils.DigestReference(tag) { // only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest) if err = s.Tag(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { return false, err } } return tagUpdated, nil }
func (s *TagStore) pullImage(r *registry.Session, out io.Writer, imgID, endpoint string, token []string, sf *utils.StreamFormatter) (bool, error) { history, err := r.GetRemoteHistory(imgID, endpoint, token) if err != nil { return false, err } out.Write(sf.FormatProgress(utils.TruncateID(imgID), "Pulling dependent layers", nil)) // FIXME: Try to stream the images? // FIXME: Launch the getRemoteImage() in goroutines layers_downloaded := false for i := len(history) - 1; i >= 0; i-- { id := history[i] // ensure no two downloads of the same layer happen at the same time if c, err := s.poolAdd("pull", "layer:"+id); err != nil { log.Debugf("Image (id: %s) pull is already running, skipping: %v", id, err) <-c } defer s.poolRemove("pull", "layer:"+id) if !s.graph.Exists(id) { out.Write(sf.FormatProgress(utils.TruncateID(id), "Pulling metadata", nil)) var ( imgJSON []byte imgSize int err error img *image.Image ) retries := 5 for j := 1; j <= retries; j++ { imgJSON, imgSize, err = r.GetRemoteImageJSON(id, endpoint, token) if err != nil && j == retries { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, err } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } img, err = image.NewImgJSON(imgJSON) layers_downloaded = true if err != nil && j == retries { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, fmt.Errorf("Failed to parse json: %s", err) } else if err != nil { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else { break } } for j := 1; j <= retries; j++ { // Get the layer status := "Pulling fs layer" if j > 1 { status = fmt.Sprintf("Pulling fs layer [retries: %d]", j) } out.Write(sf.FormatProgress(utils.TruncateID(id), status, nil)) layer, err := r.GetRemoteImageLayer(img.ID, endpoint, token, int64(imgSize)) if uerr, ok := err.(*url.Error); ok { err = uerr.Err } if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error pulling dependent layers", nil)) return layers_downloaded, err } layers_downloaded = true defer layer.Close() err = s.graph.Register(img, utils.ProgressReader(layer, imgSize, out, sf, false, utils.TruncateID(id), "Downloading")) if terr, ok := err.(net.Error); ok && terr.Timeout() && j < retries { time.Sleep(time.Duration(j) * 500 * time.Millisecond) continue } else if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(id), "Error downloading dependent layers", nil)) return layers_downloaded, err } else { break } } } out.Write(sf.FormatProgress(utils.TruncateID(id), "Download complete", nil)) } return layers_downloaded, nil }
func (s *TagStore) pushV2Repository(r *registry.Session, localRepo Repository, out io.Writer, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter) error { endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { if repoInfo.Index.Official { log.Debugf("Unable to push to V2 registry, falling back to v1: %s", err) return ErrV2RegistryUnavailable } return fmt.Errorf("error getting registry endpoint: %s", err) } tags, err := s.getImageTags(localRepo, tag) if err != nil { return err } if len(tags) == 0 { return fmt.Errorf("No tags to push for %s", repoInfo.LocalName) } auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false) if err != nil { return fmt.Errorf("error getting authorization: %s", err) } for _, tag := range tags { log.Debugf("Pushing repository: %s:%s", repoInfo.CanonicalName, tag) layerId, exists := localRepo[tag] if !exists { return fmt.Errorf("tag does not exist: %s", tag) } layer, err := s.graph.Get(layerId) if err != nil { return err } m := ®istry.ManifestData{ SchemaVersion: 1, Name: repoInfo.RemoteName, Tag: tag, Architecture: layer.Architecture, } var metadata runconfig.Config if layer.Config != nil { metadata = *layer.Config } layersSeen := make(map[string]bool) layers := []*image.Image{layer} for ; layer != nil; layer, err = layer.GetParent() { if err != nil { return err } if layersSeen[layer.ID] { break } layers = append(layers, layer) layersSeen[layer.ID] = true } m.FSLayers = make([]*registry.FSLayer, len(layers)) m.History = make([]*registry.ManifestHistory, len(layers)) // Schema version 1 requires layer ordering from top to root for i, layer := range layers { log.Debugf("Pushing layer: %s", layer.ID) if layer.Config != nil && metadata.Image != layer.ID { err = runconfig.Merge(&metadata, layer.Config) if err != nil { return err } } jsonData, err := layer.RawJson() if err != nil { return fmt.Errorf("cannot retrieve the path for %s: %s", layer.ID, err) } checksum, err := layer.GetCheckSum(s.graph.ImageRoot(layer.ID)) if err != nil { return fmt.Errorf("error getting image checksum: %s", err) } var exists bool if len(checksum) > 0 { sumParts := strings.SplitN(checksum, ":", 2) if len(sumParts) < 2 { return fmt.Errorf("Invalid checksum: %s", checksum) } // Call mount blob exists, err = r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], sumParts[1], auth) if err != nil { out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image push failed", nil)) return err } } if !exists { if cs, err := s.pushV2Image(r, layer, endpoint, repoInfo.RemoteName, sf, out, auth); err != nil { return err } else if cs != checksum { // Cache new checksum if err := layer.SaveCheckSum(s.graph.ImageRoot(layer.ID), cs); err != nil { return err } checksum = cs } } else { out.Write(sf.FormatProgress(common.TruncateID(layer.ID), "Image already exists", nil)) } m.FSLayers[i] = ®istry.FSLayer{BlobSum: checksum} m.History[i] = ®istry.ManifestHistory{V1Compatibility: string(jsonData)} } if err := checkValidManifest(m); err != nil { return fmt.Errorf("invalid manifest: %s", err) } log.Debugf("Pushing %s:%s to v2 repository", repoInfo.LocalName, tag) mBytes, err := json.MarshalIndent(m, "", " ") if err != nil { return err } js, err := libtrust.NewJSONSignature(mBytes) if err != nil { return err } if err = js.Sign(s.trustKey); err != nil { return err } signedBody, err := js.PrettySignature("signatures") if err != nil { return err } log.Infof("Signed manifest for %s:%s using daemon's key: %s", repoInfo.LocalName, tag, s.trustKey.KeyID()) // push the manifest digest, err := r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, signedBody, mBytes, auth) if err != nil { return err } out.Write(sf.FormatStatus("", "Digest: %s", digest)) } return nil }
func (s *TagStore) pullV2Tag(eng *engine.Engine, r *registry.Session, out io.Writer, endpoint *registry.Endpoint, repoInfo *registry.RepositoryInfo, tag string, sf *utils.StreamFormatter, parallel bool, auth *registry.RequestAuthorization) (bool, error) { log.Debugf("Pulling tag from V2 registry: %q", tag) manifestBytes, err := r.GetV2ImageManifest(endpoint, repoInfo.RemoteName, tag, auth) if err != nil { return false, err } manifest, verified, err := s.loadManifest(eng, manifestBytes) if err != nil { return false, fmt.Errorf("error verifying manifest: %s", err) } if err := checkValidManifest(manifest); err != nil { return false, err } if verified { log.Printf("Image manifest for %s:%s has been verified", repoInfo.CanonicalName, tag) } else { out.Write(sf.FormatStatus(tag, "Pulling from %s", repoInfo.CanonicalName)) } downloads := make([]downloadInfo, len(manifest.FSLayers)) for i := len(manifest.FSLayers) - 1; i >= 0; i-- { var ( sumStr = manifest.FSLayers[i].BlobSum imgJSON = []byte(manifest.History[i].V1Compatibility) ) img, err := image.NewImgJSON(imgJSON) if err != nil { return false, fmt.Errorf("failed to parse json: %s", err) } downloads[i].img = img // Check if exists if s.graph.Exists(img.ID) { log.Debugf("Image already exists: %s", img.ID) continue } chunks := strings.SplitN(sumStr, ":", 2) if len(chunks) < 2 { return false, fmt.Errorf("expected 2 parts in the sumStr, got %#v", chunks) } sumType, checksum := chunks[0], chunks[1] out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Pulling fs layer", nil)) downloadFunc := func(di *downloadInfo) error { log.Debugf("pulling blob %q to V1 img %s", sumStr, img.ID) if c, err := s.poolAdd("pull", "img:"+img.ID); err != nil { if c != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Layer already being pulled by another client. Waiting.", nil)) <-c out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) } else { log.Debugf("Image (id: %s) pull is already running, skipping: %v", img.ID, err) } } else { defer s.poolRemove("pull", "img:"+img.ID) tmpFile, err := ioutil.TempFile("", "GetV2ImageBlob") if err != nil { return err } r, l, err := r.GetV2ImageBlobReader(endpoint, repoInfo.RemoteName, sumType, checksum, auth) if err != nil { return err } defer r.Close() // Wrap the reader with the appropriate TarSum reader. tarSumReader, err := tarsum.NewTarSumForLabel(r, true, sumType) if err != nil { return fmt.Errorf("unable to wrap image blob reader with TarSum: %s", err) } io.Copy(tmpFile, utils.ProgressReader(ioutil.NopCloser(tarSumReader), int(l), out, sf, false, utils.TruncateID(img.ID), "Downloading")) out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Verifying Checksum", nil)) if finalChecksum := tarSumReader.Sum(nil); !strings.EqualFold(finalChecksum, sumStr) { return fmt.Errorf("image verification failed: checksum mismatch - expected %q but got %q", sumStr, finalChecksum) } out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Download complete", nil)) log.Debugf("Downloaded %s to tempfile %s", img.ID, tmpFile.Name()) di.tmpFile = tmpFile di.length = l di.downloaded = true } di.imgJSON = imgJSON return nil } if parallel { downloads[i].err = make(chan error) go func(di *downloadInfo) { di.err <- downloadFunc(di) }(&downloads[i]) } else { err := downloadFunc(&downloads[i]) if err != nil { return false, err } } } var layersDownloaded bool for i := len(downloads) - 1; i >= 0; i-- { d := &downloads[i] if d.err != nil { err := <-d.err if err != nil { return false, err } } if d.downloaded { // if tmpFile is empty assume download and extracted elsewhere defer os.Remove(d.tmpFile.Name()) defer d.tmpFile.Close() d.tmpFile.Seek(0, 0) if d.tmpFile != nil { err = s.graph.Register(d.img, utils.ProgressReader(d.tmpFile, int(d.length), out, sf, false, utils.TruncateID(d.img.ID), "Extracting")) if err != nil { return false, err } // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted) } out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Pull complete", nil)) layersDownloaded = true } else { out.Write(sf.FormatProgress(utils.TruncateID(d.img.ID), "Already exists", nil)) } } out.Write(sf.FormatStatus(repoInfo.CanonicalName+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security.")) if err = s.Set(repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil { return false, err } return layersDownloaded, nil }
func (s *TagStore) pushV2Repository(r *registry.Session, eng *engine.Engine, out io.Writer, repoInfo *registry.RepositoryInfo, manifestBytes, tag string, sf *utils.StreamFormatter) error { if repoInfo.Official { j := eng.Job("trust_update_base") if err := j.Run(); err != nil { log.Errorf("error updating trust base graph: %s", err) } } endpoint, err := r.V2RegistryEndpoint(repoInfo.Index) if err != nil { return fmt.Errorf("error getting registry endpoint: %s", err) } auth, err := r.GetV2Authorization(endpoint, repoInfo.RemoteName, false) if err != nil { return fmt.Errorf("error getting authorization: %s", err) } // if no manifest is given, generate and sign with the key associated with the local tag store if len(manifestBytes) == 0 { mBytes, err := s.newManifest(repoInfo.LocalName, repoInfo.RemoteName, tag) if err != nil { return err } js, err := libtrust.NewJSONSignature(mBytes) if err != nil { return err } if err = js.Sign(s.trustKey); err != nil { return err } signedBody, err := js.PrettySignature("signatures") if err != nil { return err } log.Infof("Signed manifest using daemon's key: %s", s.trustKey.KeyID()) manifestBytes = string(signedBody) } manifest, verified, err := s.verifyManifest(eng, []byte(manifestBytes)) if err != nil { return fmt.Errorf("error verifying manifest: %s", err) } if err := checkValidManifest(manifest); err != nil { return fmt.Errorf("invalid manifest: %s", err) } if !verified { log.Debugf("Pushing unverified image") } for i := len(manifest.FSLayers) - 1; i >= 0; i-- { var ( sumStr = manifest.FSLayers[i].BlobSum imgJSON = []byte(manifest.History[i].V1Compatibility) ) sumParts := strings.SplitN(sumStr, ":", 2) if len(sumParts) < 2 { return fmt.Errorf("Invalid checksum: %s", sumStr) } manifestSum := sumParts[1] img, err := image.NewImgJSON(imgJSON) if err != nil { return fmt.Errorf("Failed to parse json: %s", err) } img, err = s.graph.Get(img.ID) if err != nil { return err } arch, err := img.TarLayer() if err != nil { return fmt.Errorf("Could not get tar layer: %s", err) } // Call mount blob exists, err := r.HeadV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, auth) if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) return err } if !exists { err = r.PutV2ImageBlob(endpoint, repoInfo.RemoteName, sumParts[0], manifestSum, utils.ProgressReader(arch, int(img.Size), out, sf, false, utils.TruncateID(img.ID), "Pushing"), auth) if err != nil { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image push failed", nil)) return err } out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image successfully pushed", nil)) } else { out.Write(sf.FormatProgress(utils.TruncateID(img.ID), "Image already exists", nil)) } } // push the manifest return r.PutV2ImageManifest(endpoint, repoInfo.RemoteName, tag, bytes.NewReader([]byte(manifestBytes)), auth) }