func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) for i := 0; i < blobCount; i++ { dgst, blob := newRandomBlob((i % 5) * 16) blobs[i] = schema1.FSLayer{BlobSum: dgst} history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} } m := schema1.Manifest{ Name: name.String(), Tag: tag, Architecture: "x86", FSLayers: blobs, History: history, Versioned: manifest.Versioned{ SchemaVersion: 1, }, } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { panic(err) } sm, err := schema1.Sign(&m, pk) if err != nil { panic(err) } return sm, digest.FromBytes(sm.Canonical), sm.Canonical }
// Delete deletes a reference from the store. It returns true if a deletion // happened, or false otherwise. func (store *store) Delete(ref reference.Named) (bool, error) { ref = defaultTagIfNameOnly(ref) store.mu.Lock() defer store.mu.Unlock() repoName := ref.Name() repository, exists := store.Repositories[repoName] if !exists { return false, ErrDoesNotExist } refStr := ref.String() if id, exists := repository[refStr]; exists { delete(repository, refStr) if len(repository) == 0 { delete(store.Repositories, repoName) } if store.referencesByIDCache[id] != nil { delete(store.referencesByIDCache[id], refStr) if len(store.referencesByIDCache[id]) == 0 { delete(store.referencesByIDCache, id) } } return true, store.save() } return false, ErrDoesNotExist }
func (t *mockTagAdder) Add(ref reference.Named, id image.ID, force bool) error { if t.refs == nil { t.refs = make(map[string]string) } t.refs[ref.String()] = id.String() return nil }
// TagImage creates a tag in the repository reponame, pointing to the image named // imageName. func (daemon *Daemon) TagImage(newTag reference.Named, imageName string) error { imageID, err := daemon.GetImageID(imageName) if err != nil { return err } newTag = registry.NormalizeLocalReference(newTag) if err := daemon.tagStore.AddTag(newTag, imageID, true); err != nil { return err } daemon.EventsService.Log("tag", newTag.String(), "") return nil }
// TagImage creates a tag in the repository reponame, pointing to the image named // imageName. If force is true, an existing tag with the same name may be // overwritten. func (daemon *Daemon) TagImage(newTag reference.Named, imageName string, force bool) error { if _, isDigested := newTag.(reference.Digested); isDigested { return errors.New("refusing to create a tag with a digest reference") } if newTag.Name() == string(digest.Canonical) { return errors.New("refusing to create an ambiguous tag using digest algorithm as name") } newTag = registry.NormalizeLocalReference(newTag) imageID, err := daemon.GetImageID(imageName) if err != nil { return err } daemon.EventsService.Log("tag", newTag.String(), "") return daemon.tagStore.Add(newTag, imageID, force) }
// Get retrieves an item from the store by reference. func (store *store) Get(ref reference.Named) (image.ID, error) { ref = defaultTagIfNameOnly(ref) store.mu.RLock() defer store.mu.RUnlock() repository, exists := store.Repositories[ref.Name()] if !exists || repository == nil { return "", ErrDoesNotExist } id, exists := repository[ref.String()] if !exists { return "", ErrDoesNotExist } return id, nil }
func (store *store) addReference(ref reference.Named, id image.ID, force bool) error { if ref.Name() == string(digest.Canonical) { return errors.New("refusing to create an ambiguous tag using digest algorithm as name") } store.mu.Lock() defer store.mu.Unlock() repository, exists := store.Repositories[ref.Name()] if !exists || repository == nil { repository = make(map[string]image.ID) store.Repositories[ref.Name()] = repository } refStr := ref.String() oldID, exists := repository[refStr] if exists { // force only works for tags if digested, isDigest := ref.(reference.Digested); isDigest { return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) } if !force { return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) } if store.referencesByIDCache[oldID] != nil { delete(store.referencesByIDCache[oldID], refStr) if len(store.referencesByIDCache[oldID]) == 0 { delete(store.referencesByIDCache, oldID) } } } repository[refStr] = id if store.referencesByIDCache[id] == nil { store.referencesByIDCache[id] = make(map[string]reference.Named) } store.referencesByIDCache[id][refStr] = ref return store.save() }
// Pull initiates a pull operation. image is the repository name to pull, and // tag may be either empty, or indicate a specific tag to pull. func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { // Resolve the Repository name from fqn to RepositoryInfo repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) if err != nil { return err } // makes sure name is not empty or `scratch` if err := validateRepoName(repoInfo.LocalName.Name()); err != nil { return err } endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.CanonicalName) if err != nil { return err } localName := registry.NormalizeLocalReference(ref) var ( // use a slice to append the error strings and return a joined string to caller errors []string // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport // By default it is false, which means that if a ErrNoSupport error is encountered, it will be saved in errors. // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of // any subsequent ErrNoSupport errors in errors. // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant // error is the ones from v2 endpoints not v1. discardNoSupportErrors bool ) for _, endpoint := range endpoints { logrus.Debugf("Trying to pull %s from %s %s", repoInfo.LocalName, endpoint.URL, endpoint.Version) puller, err := newPuller(endpoint, repoInfo, imagePullConfig) if err != nil { errors = append(errors, err.Error()) continue } if fallback, err := puller.Pull(ctx, ref); err != nil { // Was this pull cancelled? If so, don't try to fall // back. select { case <-ctx.Done(): fallback = false default: } if fallback { if _, ok := err.(registry.ErrNoSupport); !ok { // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. discardNoSupportErrors = true // append subsequent errors errors = append(errors, err.Error()) } else if !discardNoSupportErrors { // Save the ErrNoSupport error, because it's either the first error or all encountered errors // were also ErrNoSupport errors. // append subsequent errors errors = append(errors, err.Error()) } continue } errors = append(errors, err.Error()) logrus.Debugf("Not continuing with error: %v", fmt.Errorf(strings.Join(errors, "\n"))) if len(errors) > 0 { return fmt.Errorf(strings.Join(errors, "\n")) } } imagePullConfig.EventsService.Log("pull", localName.String(), "") return nil } if len(errors) == 0 { return fmt.Errorf("no endpoints found for %s", ref.String()) } if len(errors) > 0 { return fmt.Errorf(strings.Join(errors, "\n")) } return nil }
func (p *v2Puller) pullV2Tag(out io.Writer, ref reference.Named) (tagUpdated bool, err error) { tagOrDigest := "" if tagged, isTagged := ref.(reference.Tagged); isTagged { tagOrDigest = tagged.Tag() } else if digested, isDigested := ref.(reference.Digested); isDigested { tagOrDigest = digested.Digest().String() } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) } logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest) manSvc, err := p.repo.Manifests(context.Background()) if err != nil { return false, err } unverifiedManifest, err := manSvc.GetByTag(tagOrDigest) if err != nil { return false, err } if unverifiedManifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } var verifiedManifest *schema1.Manifest verifiedManifest, err = verifyManifest(unverifiedManifest, ref) if err != nil { return false, err } rootFS := image.NewRootFS() if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { return false, err } // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return false, err } out.Write(p.sf.FormatStatus(tagOrDigest, "Pulling from %s", p.repo.Name())) var downloads []*downloadInfo defer func() { for _, d := range downloads { p.config.Pool.removeWithError(d.poolKey, err) if d.tmpFile != nil { d.tmpFile.Close() if err := os.RemoveAll(d.tmpFile.Name()); err != nil { logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name()) } } } }() // Image history converted to the new format var history []image.History poolKey := "v2layer:" notFoundLocally := false // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum poolKey += blobSum.String() var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return false, err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return false, err } history = append(history, h) if throwAway.ThrowAway { continue } // Do we have a layer on disk corresponding to the set of // blobsums up to this point? if !notFoundLocally { notFoundLocally = true diffID, err := p.blobSumService.GetDiffID(blobSum) if err == nil { rootFS.Append(diffID) if l, err := p.config.LayerStore.Get(rootFS.ChainID()); err == nil { notFoundLocally = false logrus.Debugf("Layer already exists: %s", blobSum.String()) out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Already exists", nil)) defer layer.ReleaseAndLog(p.config.LayerStore, l) continue } else { rootFS.DiffIDs = rootFS.DiffIDs[:len(rootFS.DiffIDs)-1] } } } out.Write(p.sf.FormatProgress(stringid.TruncateID(blobSum.String()), "Pulling fs layer", nil)) tmpFile, err := ioutil.TempFile("", "GetImageBlob") if err != nil { return false, err } d := &downloadInfo{ poolKey: poolKey, digest: blobSum, tmpFile: tmpFile, // TODO: seems like this chan buffer solved hanging problem in go1.5, // this can indicate some deeper problem that somehow we never take // error from channel in loop below err: make(chan error, 1), } downloads = append(downloads, d) broadcaster, found := p.config.Pool.add(d.poolKey) broadcaster.Add(out) d.broadcaster = broadcaster if found { d.err <- nil } else { go p.download(d) } } for _, d := range downloads { if err := <-d.err; err != nil { return false, err } if d.layer == nil { // Wait for a different pull to download and extract // this layer. err = d.broadcaster.Wait() if err != nil { return false, err } diffID, err := p.blobSumService.GetDiffID(d.digest) if err != nil { return false, err } rootFS.Append(diffID) l, err := p.config.LayerStore.Get(rootFS.ChainID()) if err != nil { return false, err } defer layer.ReleaseAndLog(p.config.LayerStore, l) continue } d.tmpFile.Seek(0, 0) reader := progressreader.New(progressreader.Config{ In: d.tmpFile, Out: d.broadcaster, Formatter: p.sf, Size: d.size, NewLines: false, ID: stringid.TruncateID(d.digest.String()), Action: "Extracting", }) inflatedLayerData, err := archive.DecompressStream(reader) if err != nil { return false, fmt.Errorf("could not get decompression stream: %v", err) } l, err := p.config.LayerStore.Register(inflatedLayerData, rootFS.ChainID()) if err != nil { return false, fmt.Errorf("failed to register layer: %v", err) } logrus.Debugf("layer %s registered successfully", l.DiffID()) rootFS.Append(l.DiffID()) // Cache mapping from this layer's DiffID to the blobsum if err := p.blobSumService.Add(l.DiffID(), d.digest); err != nil { return false, err } defer layer.ReleaseAndLog(p.config.LayerStore, l) d.broadcaster.Write(p.sf.FormatProgress(stringid.TruncateID(d.digest.String()), "Pull complete", nil)) d.broadcaster.Close() tagUpdated = true } config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), rootFS, history) if err != nil { return false, err } imageID, err := p.config.ImageStore.Create(config) if err != nil { return false, err } manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName.Name()) if err != nil { return false, err } // Check for new tag if no layers downloaded var oldTagImageID image.ID if !tagUpdated { oldTagImageID, err = p.config.TagStore.Get(ref) if err != nil || oldTagImageID != imageID { tagUpdated = true } } if tagUpdated { if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.TagStore.AddDigest(canonical, imageID, true); err != nil { return false, err } } else if err = p.config.TagStore.AddTag(ref, imageID, true); err != nil { return false, err } } if manifestDigest != "" { out.Write(p.sf.FormatStatus("", "Digest: %s", manifestDigest)) } return tagUpdated, nil }
// LabelsForTaggedImage makes a query to a docker registry an returns a map of the labels on that image. // Currently supports the v2.0 registry Schema v1 (not to be confused with Schema v2) // This shouldn't be a problem, since the second version of the schema isn't due until the summer // and registries that suport it are supposed to use HTTP Accept headers to negotiate with clients. // c.f. https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md // and https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md // // labelsForTaggedImage( // "http://artifactory.otenv.com/artifactory/api/docker/docker-v2/v2", // "demo-server", // "demo-server-0.7.3-SNAPSHOT-20160329_202654_teamcity-unconfigured" // ) // ( which returns an empty map, since the demo-server doesn't have labels... ) func (c *liveClient) metadataForImage(regHost string, ref reference.Named, etag string) (md Metadata, err error) { // slightly weird but: a non-empty etag implies that we've seen this // digest-named container before - and a digest reference should be // immutable. if _, ok := ref.(reference.Digested); ok && etag != "" { return Metadata{}, distribution.ErrManifestNotModified } rep, err := c.registryForHostname(regHost) if err != nil { return } mani, dg, headers, err := rep.getManifestWithEtag(c.ctx, ref, etag) if err != nil { return } md = Metadata{ Registry: regHost, AllNames: make([]string, 2), Labels: make(map[string]string), Etag: headers.Get("Etag"), } md.AllNames[0] = ref.String() md.CanonicalName = ref.Name() + "@" + dg.String() md.AllNames[1] = md.CanonicalName switch mani := mani.(type) { case *schema1.SignedManifest: history := mani.History for _, v1 := range history { var historyEntry V1Schema json.Unmarshal([]byte(v1.V1Compatibility), &historyEntry) // log.Print(historyEntry.ContainerConfig.Cmd) histLabels := historyEntry.CC.Labels // XXX It's unclear from the docker spec which order the labels appear in. // It may be that this is the wrong order to merge labels in - // and I have the dim recollection that the order may change between schema 1 vs. 2 for k, v := range histLabels { md.Labels[k] = v } } case *schema2.DeserializedManifest: var cj []byte cj, err = rep.getBlob(c.ctx, ref, mani.Config.Digest) var c stubConfig err = json.Unmarshal(cj, &c) if err != nil { return } md.Labels = c.Config.Labels default: // We shouldn't receive this, because we shouldn't include the Accept // header that would trigger it. To begin work on this (because...?) start // by adding schema2 as an import - it's a sibling of schema1. Schema2 // includes a 'config' key, which has a digest for a blob - see // distribution/pull_v2 pullSchema2ImageConfig() (~ ln 677) err = fmt.Errorf("Cripes! Don't know that format of manifest") } return }
func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { tagOrDigest := "" if tagged, isTagged := ref.(reference.Tagged); isTagged { tagOrDigest = tagged.Tag() } else if digested, isDigested := ref.(reference.Digested); isDigested { tagOrDigest = digested.Digest().String() } else { return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) } logrus.Debugf("Pulling ref from V2 registry: %q", tagOrDigest) manSvc, err := p.repo.Manifests(ctx) if err != nil { return false, err } unverifiedManifest, err := manSvc.GetByTag(tagOrDigest) if err != nil { return false, err } if unverifiedManifest == nil { return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) } var verifiedManifest *schema1.Manifest verifiedManifest, err = verifyManifest(unverifiedManifest, ref) if err != nil { return false, err } rootFS := image.NewRootFS() if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { return false, err } // remove duplicate layers and check parent chain validity err = fixManifestLayers(verifiedManifest) if err != nil { return false, err } progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Name()) var descriptors []xfer.DownloadDescriptor // Image history converted to the new format var history []image.History // Note that the order of this loop is in the direction of bottom-most // to top-most, so that the downloads slice gets ordered correctly. for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { blobSum := verifiedManifest.FSLayers[i].BlobSum var throwAway struct { ThrowAway bool `json:"throwaway,omitempty"` } if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { return false, err } h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) if err != nil { return false, err } history = append(history, h) if throwAway.ThrowAway { continue } layerDescriptor := &v2LayerDescriptor{ digest: blobSum, repo: p.repo, blobSumService: p.blobSumService, } descriptors = append(descriptors, layerDescriptor) } resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) if err != nil { return false, err } defer release() config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) if err != nil { return false, err } imageID, err := p.config.ImageStore.Create(config) if err != nil { return false, err } manifestDigest, _, err := digestFromManifest(unverifiedManifest, p.repoInfo.LocalName.Name()) if err != nil { return false, err } if manifestDigest != "" { progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) } oldTagImageID, err := p.config.TagStore.Get(ref) if err == nil && oldTagImageID == imageID { return false, nil } if canonical, ok := ref.(reference.Canonical); ok { if err = p.config.TagStore.AddDigest(canonical, imageID, true); err != nil { return false, err } } else if err = p.config.TagStore.AddTag(ref, imageID, true); err != nil { return false, err } return true, nil }