func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filter := r.Form.Get("filters") netFilters, err := filters.FromParam(filter) if err != nil { return err } if netFilters.Len() != 0 { if err := netFilters.Validate(acceptedFilters); err != nil { return err } } list := []*types.NetworkResource{} nwList := n.backend.GetAllNetworks() displayable, err := filterNetworks(nwList, netFilters) if err != nil { return err } for _, nw := range displayable { list = append(list, buildNetworkResource(nw)) } return httputils.WriteJSON(w, http.StatusOK, list) }
func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filter := r.Form.Get("filters") netFilters, err := filters.FromParam(filter) if err != nil { return err } list := []types.NetworkResource{} if nr, err := n.clusterProvider.GetNetworks(); err == nil { list = append(list, nr...) } // Combine the network list returned by Docker daemon if it is not already // returned by the cluster manager SKIP: for _, nw := range n.backend.GetNetworks() { for _, nl := range list { if nl.ID == nw.ID() { continue SKIP } } list = append(list, *n.buildNetworkResource(nw)) } list, err = filterNetworks(list, netFilters) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, list) }
// Volumes lists known volumes, using the filter to restrict the range // of volumes returned. func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { var ( volumesOut []*types.Volume ) volFilters, err := filters.FromParam(filter) if err != nil { return nil, nil, err } if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { return nil, nil, err } volumes, warnings, err := daemon.volumes.List() if err != nil { return nil, nil, err } filterVolumes, err := daemon.filterVolumes(volumes, volFilters) if err != nil { return nil, nil, err } for _, v := range filterVolumes { apiV := volumeToAPIType(v) if vv, ok := v.(interface { CachedPath() string }); ok { apiV.Mountpoint = vv.CachedPath() } else { apiV.Mountpoint = v.Path() } volumesOut = append(volumesOut, apiV) } return volumesOut, warnings, nil }
func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filter, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } config := &types.ContainerListOptions{ All: httputils.BoolValue(r, "all"), Size: httputils.BoolValue(r, "size"), Since: r.Form.Get("since"), Before: r.Form.Get("before"), Filters: filter, } if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { limit, err := strconv.Atoi(tmpLimit) if err != nil { return err } config.Limit = limit } containers, err := s.backend.Containers(config) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, containers) }
func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } pruneFilters, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } pruneReport, err := s.backend.ContainersPrune(pruneFilters) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, pruneReport) }
func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filters, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, secrets) }
func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filter, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) if err != nil { logrus.Errorf("Error getting tasks: %v", err) return err } return httputils.WriteJSON(w, http.StatusOK, tasks) }
// Volumes lists known volumes, using the filter to restrict the range // of volumes returned. func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, error) { var volumesOut []*types.Volume volFilters, err := filters.FromParam(filter) if err != nil { return nil, err } filterUsed := volFilters.Include("dangling") && (volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1")) volumes := daemon.volumes.List() for _, v := range volumes { if filterUsed && daemon.volumes.Count(v) > 0 { continue } volumesOut = append(volumesOut, volumeToAPIType(v)) } return volumesOut, nil }
// Volumes lists known volumes, using the filter to restrict the range // of volumes returned. func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { var volumesOut []*types.Volume volFilters, err := filters.FromParam(filter) if err != nil { return nil, nil, err } filterUsed := volFilters.Include("dangling") && (volFilters.ExactMatch("dangling", "true") || volFilters.ExactMatch("dangling", "1")) volumes, warnings, err := daemon.volumes.List() if err != nil { return nil, nil, err } if filterUsed { volumes = daemon.volumes.FilterByUsed(volumes) } for _, v := range volumes { volumesOut = append(volumesOut, volumeToAPIType(v)) } return volumesOut, warnings, nil }
func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } imageFilters, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } version := httputils.VersionFromContext(ctx) filterParam := r.Form.Get("filter") if versions.LessThan(version, "1.28") && filterParam != "" { imageFilters.Add("reference", filterParam) } images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) if err != nil { return err } return httputils.WriteJSON(w, http.StatusOK, images) }
// GET /networks func getNetworks(c *context, w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { httpError(w, err.Error(), http.StatusInternalServerError) return } filters, err := dockerfilters.FromParam(r.Form.Get("filters")) if err != nil { httpError(w, err.Error(), http.StatusBadRequest) return } out := []*dockerclient.NetworkResource{} networks := c.cluster.Networks().Filter(filters.Get("name"), filters.Get("id")) for _, network := range networks { tmp := (*network).NetworkResource if tmp.Scope == "local" { tmp.Name = network.Engine.Name + "/" + network.Name } out = append(out, &tmp) } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(out) }
func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } filter := r.Form.Get("filters") netFilters, err := filters.FromParam(filter) if err != nil { return err } list := []*types.NetworkResource{} netFilters.WalkValues("name", func(name string) error { if nw, err := n.backend.GetNetwork(name, daemon.NetworkByName); err == nil { list = append(list, buildNetworkResource(nw)) } else { logrus.Errorf("failed to get network for filter=%s : %v", name, err) } return nil }) netFilters.WalkValues("id", func(id string) error { for _, nw := range n.backend.GetNetworksByID(id) { list = append(list, buildNetworkResource(nw)) } return nil }) if !netFilters.Include("name") && !netFilters.Include("id") { nwList := n.backend.GetNetworksByID("") for _, nw := range nwList { list = append(list, buildNetworkResource(nw)) } } return httputils.WriteJSON(w, http.StatusOK, list) }
func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, sinceNano, err := timetypes.ParseTimestamps(r.Form.Get("since"), -1) if err != nil { return err } until, untilNano, err := timetypes.ParseTimestamps(r.Form.Get("until"), -1) if err != nil { return err } timer := time.NewTimer(0) timer.Stop() if until > 0 || untilNano > 0 { dur := time.Unix(until, untilNano).Sub(time.Now()) timer = time.NewTimer(dur) } ef, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") // This is to ensure that the HTTP status code is sent immediately, // so that it will not block the receiver. w.WriteHeader(http.StatusOK) if flusher, ok := w.(http.Flusher); ok { flusher.Flush() } output := ioutils.NewWriteFlusher(w) defer output.Close() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, sinceNano, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } var closeNotify <-chan bool if closeNotifier, ok := w.(http.CloseNotifier); ok { closeNotify = closeNotifier.CloseNotify() } for { select { case ev := <-l: jev, ok := ev.(*jsonmessage.JSONMessage) if !ok { continue } if err := enc.Encode(jev); err != nil { return err } case <-timer.C: return nil case <-closeNotify: logrus.Debug("Client disconnected, stop sending events") return nil } } }
// GET /images/json func getImagesJSON(c *context, w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { httpError(w, err.Error(), http.StatusInternalServerError) return } filters, err := dockerfilters.FromParam(r.Form.Get("filters")) if err != nil { httpError(w, err.Error(), http.StatusInternalServerError) return } // TODO: apply node filter in engine? accepteds := filters.Get("node") // this struct helps grouping images // but still keeps their Engine infos as an array. groupImages := make(map[string]dockerclient.Image) opts := cluster.ImageFilterOptions{ All: boolValue(r, "all"), NameFilter: r.FormValue("filter"), Filters: filters, } for _, image := range c.cluster.Images().Filter(opts) { if len(accepteds) != 0 { found := false for _, accepted := range accepteds { if accepted == image.Engine.Name || accepted == image.Engine.ID { found = true break } } if !found { continue } } // grouping images by Id, and concat their RepoTags if entry, existed := groupImages[image.Id]; existed { entry.RepoTags = append(entry.RepoTags, image.RepoTags...) entry.RepoDigests = append(entry.RepoDigests, image.RepoDigests...) groupImages[image.Id] = entry } else { groupImages[image.Id] = image.Image } } images := []dockerclient.Image{} for _, image := range groupImages { // de-duplicate RepoTags result := []string{} seen := map[string]bool{} for _, val := range image.RepoTags { if _, ok := seen[val]; !ok { result = append(result, val) seen[val] = true } } image.RepoTags = result // de-duplicate RepoDigests result = []string{} seen = map[string]bool{} for _, val := range image.RepoDigests { if _, ok := seen[val]; !ok { result = append(result, val) seen[val] = true } } image.RepoDigests = result images = append(images, image) } sort.Sort(sort.Reverse(ImageSorter(images))) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(images) }
// GET /containers/ps // GET /containers/json func getContainersJSON(c *context, w http.ResponseWriter, r *http.Request) { if err := r.ParseForm(); err != nil { httpError(w, err.Error(), http.StatusInternalServerError) return } // Parse flags. var ( all = boolValue(r, "all") limit = intValueOrZero(r, "limit") before *cluster.Container ) if value := r.FormValue("before"); value != "" { before = c.cluster.Container(value) if before == nil { httpError(w, fmt.Sprintf("No such container %s", value), http.StatusNotFound) return } } // Parse filters. filters, err := dockerfilters.FromParam(r.Form.Get("filters")) if err != nil { httpError(w, err.Error(), http.StatusInternalServerError) return } filtExited := []int{} for _, value := range filters.Get("exited") { code, err := strconv.Atoi(value) if err != nil { httpError(w, err.Error(), http.StatusInternalServerError) return } filtExited = append(filtExited, code) } for _, value := range filters.Get("status") { if value == "exited" { all = true } } // Filtering: select the containers we want to return. candidates := []*cluster.Container{} for _, container := range c.cluster.Containers() { // Skip stopped containers unless -a was specified. if !container.Info.State.Running && !all && before == nil && limit <= 0 { continue } // Skip swarm containers unless -a was specified. if strings.Split(container.Image, ":")[0] == "swarm" && !all { continue } // Apply filters. if len(container.Names) > 0 { if !filters.Match("name", strings.TrimPrefix(container.Names[0], "/")) { continue } } else if len(filters.Get("name")) > 0 { continue } if !filters.Match("id", container.Id) { continue } if !filters.MatchKVList("label", container.Config.Labels) { continue } if !filters.Match("status", container.Info.State.StateString()) { continue } if len(filtExited) > 0 { shouldSkip := true for _, code := range filtExited { if code == container.Info.State.ExitCode && !container.Info.State.Running { shouldSkip = false break } } if shouldSkip { continue } } candidates = append(candidates, container) } // Sort the candidates and apply limits. sort.Sort(sort.Reverse(ContainerSorter(candidates))) if limit > 0 && limit < len(candidates) { candidates = candidates[:limit] } // Convert cluster.Container back into dockerclient.Container. out := []*dockerclient.Container{} for _, container := range candidates { if before != nil { if container.Id == before.Id { before = nil } continue } // Create a copy of the underlying dockerclient.Container so we can // make changes without messing with cluster.Container. tmp := (*container).Container // Update the Status. The one we have is stale from the last `docker ps` the engine sent. // `Status()` will generate a new one tmp.Status = container.Info.State.String() if !container.Engine.IsHealthy() { tmp.Status = "Host Down" } // Overwrite labels with the ones we have in the config. // This ensures that we can freely manipulate them in the codebase and // they will be properly exported back (for instance Swarm IDs). tmp.Labels = container.Config.Labels // TODO remove the Node Name in the name when we have a good solution tmp.Names = make([]string, len(container.Names)) for i, name := range container.Names { tmp.Names[i] = "/" + container.Engine.Name + name } // insert node IP tmp.Ports = make([]dockerclient.Port, len(container.Ports)) for i, port := range container.Ports { tmp.Ports[i] = port if port.IP == "0.0.0.0" { tmp.Ports[i].IP = container.Engine.IP } } out = append(out, &tmp) } // Finally, send them back to the CLI. w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(out) }
func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { if err := httputils.ParseForm(r); err != nil { return err } since, err := eventTime(r.Form.Get("since")) if err != nil { return err } until, err := eventTime(r.Form.Get("until")) if err != nil { return err } var ( timeout <-chan time.Time onlyPastEvents bool ) if !until.IsZero() { if until.Before(since) { return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) } now := time.Now() onlyPastEvents = until.Before(now) if !onlyPastEvents { dur := until.Sub(now) timeout = time.NewTimer(dur).C } } ef, err := filters.FromParam(r.Form.Get("filters")) if err != nil { return err } w.Header().Set("Content-Type", "application/json") output := ioutils.NewWriteFlusher(w) defer output.Close() output.Flush() enc := json.NewEncoder(output) buffered, l := s.backend.SubscribeToEvents(since, until, ef) defer s.backend.UnsubscribeFromEvents(l) for _, ev := range buffered { if err := enc.Encode(ev); err != nil { return err } } if onlyPastEvents { return nil } for { select { case ev := <-l: jev, ok := ev.(events.Message) if !ok { logrus.Warnf("unexpected event message: %q", ev) continue } if err := enc.Encode(jev); err != nil { return err } case <-timeout: return nil case <-ctx.Done(): logrus.Debug("Client context cancelled, stop sending events") return nil } } }
// Images returns a filtered list of images. filterArgs is a JSON-encoded set // of filter arguments which will be interpreted by api/types/filters. // filter is a shell glob string applied to repository names. The argument // named all controls whether all images in the graph are filtered, or just // the heads. func (daemon *Daemon) Images(filterArgs, filter string, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { var ( allImages map[image.ID]*image.Image err error danglingOnly = false ) imageFilters, err := filters.FromParam(filterArgs) if err != nil { return nil, err } if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { return nil, err } if imageFilters.Include("dangling") { if imageFilters.ExactMatch("dangling", "true") { danglingOnly = true } else if !imageFilters.ExactMatch("dangling", "false") { return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) } } if danglingOnly { allImages = daemon.imageStore.Heads() } else { allImages = daemon.imageStore.Map() } var beforeFilter, sinceFilter *image.Image err = imageFilters.WalkValues("before", func(value string) error { beforeFilter, err = daemon.GetImage(value) return err }) if err != nil { return nil, err } err = imageFilters.WalkValues("since", func(value string) error { sinceFilter, err = daemon.GetImage(value) return err }) if err != nil { return nil, err } images := []*types.ImageSummary{} var imagesMap map[*image.Image]*types.ImageSummary var layerRefs map[layer.ChainID]int var allLayers map[layer.ChainID]layer.Layer var allContainers []*container.Container var filterTagged bool if filter != "" { filterRef, err := reference.ParseNamed(filter) if err == nil { // parse error means wildcard repo if _, ok := filterRef.(reference.NamedTagged); ok { filterTagged = true } } } for id, img := range allImages { if beforeFilter != nil { if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { continue } } if sinceFilter != nil { if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { continue } } if imageFilters.Include("label") { // Very old image that do not have image.Config (or even labels) if img.Config == nil { continue } // We are now sure image.Config is not nil if !imageFilters.MatchKVList("label", img.Config.Labels) { continue } } layerID := img.RootFS.ChainID() var size int64 if layerID != "" { l, err := daemon.layerStore.Get(layerID) if err != nil { return nil, err } size, err = l.Size() layer.ReleaseAndLog(daemon.layerStore, l) if err != nil { return nil, err } } newImage := newImage(img, size) for _, ref := range daemon.referenceStore.References(id.Digest()) { if filter != "" { // filter by tag/repo name if filterTagged { // filter by tag, require full ref match if ref.String() != filter { continue } } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact continue } } if _, ok := ref.(reference.Canonical); ok { newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) } if _, ok := ref.(reference.NamedTagged); ok { newImage.RepoTags = append(newImage.RepoTags, ref.String()) } } if newImage.RepoDigests == nil && newImage.RepoTags == nil { if all || len(daemon.imageStore.Children(id)) == 0 { if imageFilters.Include("dangling") && !danglingOnly { //dangling=false case, so dangling image is not needed continue } if filter != "" { // skip images with no references if filtering by tag continue } newImage.RepoDigests = []string{"<none>@<none>"} newImage.RepoTags = []string{"<none>:<none>"} } else { continue } } else if danglingOnly && len(newImage.RepoTags) > 0 { continue } if withExtraAttrs { // lazyly init variables if imagesMap == nil { allContainers = daemon.List() allLayers = daemon.layerStore.Map() imagesMap = make(map[*image.Image]*types.ImageSummary) layerRefs = make(map[layer.ChainID]int) } // Get container count newImage.Containers = 0 for _, c := range allContainers { if c.ImageID == id { newImage.Containers++ } } // count layer references rootFS := *img.RootFS rootFS.DiffIDs = nil for _, id := range img.RootFS.DiffIDs { rootFS.Append(id) chid := rootFS.ChainID() layerRefs[chid]++ if _, ok := allLayers[chid]; !ok { return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) } } imagesMap[img] = newImage } images = append(images, newImage) } if withExtraAttrs { // Get Shared and Unique sizes for img, newImage := range imagesMap { rootFS := *img.RootFS rootFS.DiffIDs = nil newImage.Size = 0 newImage.SharedSize = 0 for _, id := range img.RootFS.DiffIDs { rootFS.Append(id) chid := rootFS.ChainID() diffSize, err := allLayers[chid].DiffSize() if err != nil { return nil, err } if layerRefs[chid] > 1 { newImage.SharedSize += diffSize } else { newImage.Size += diffSize } } } } sort.Sort(sort.Reverse(byCreated(images))) return images, nil }
// SearchRegistryForImages queries the registry for images matching // term. authConfig is used to login. func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, headers map[string][]string) (*registrytypes.SearchResults, error) { searchFilters, err := filters.FromParam(filtersArgs) if err != nil { return nil, err } if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { return nil, err } var isAutomated, isOfficial bool var hasStarFilter = 0 if searchFilters.Include("is-automated") { if searchFilters.UniqueExactMatch("is-automated", "true") { isAutomated = true } else if !searchFilters.UniqueExactMatch("is-automated", "false") { return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) } } if searchFilters.Include("is-official") { if searchFilters.UniqueExactMatch("is-official", "true") { isOfficial = true } else if !searchFilters.UniqueExactMatch("is-official", "false") { return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) } } if searchFilters.Include("stars") { hasStars := searchFilters.Get("stars") for _, hasStar := range hasStars { iHasStar, err := strconv.Atoi(hasStar) if err != nil { return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) } if iHasStar > hasStarFilter { hasStarFilter = iHasStar } } } unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) if err != nil { return nil, err } filteredResults := []registrytypes.SearchResult{} for _, result := range unfilteredResult.Results { if searchFilters.Include("is-automated") { if isAutomated != result.IsAutomated { continue } } if searchFilters.Include("is-official") { if isOfficial != result.IsOfficial { continue } } if searchFilters.Include("stars") { if result.StarCount < hasStarFilter { continue } } filteredResults = append(filteredResults, result) } return ®istrytypes.SearchResults{ Query: unfilteredResult.Query, NumResults: len(filteredResults), Results: filteredResults, }, nil }
// Images returns a filtered list of images. filterArgs is a JSON-encoded set // of filter arguments which will be interpreted by api/types/filters. // filter is a shell glob string applied to repository names. The argument // named all controls whether all images in the graph are filtered, or just // the heads. func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { var ( allImages map[image.ID]*image.Image err error danglingOnly = false ) imageFilters, err := filters.FromParam(filterArgs) if err != nil { return nil, err } if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { return nil, err } if imageFilters.Include("dangling") { if imageFilters.ExactMatch("dangling", "true") { danglingOnly = true } else if !imageFilters.ExactMatch("dangling", "false") { return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) } } if danglingOnly { allImages = daemon.imageStore.Heads() } else { allImages = daemon.imageStore.Map() } images := []*types.Image{} var filterTagged bool if filter != "" { filterRef, err := reference.Parse(filter) if err == nil { // parse error means wildcard repo if _, ok := filterRef.(reference.Tagged); ok { filterTagged = true } } } for id, img := range allImages { if imageFilters.Include("label") { // Very old image that do not have image.Config (or even labels) if img.Config == nil { continue } // We are now sure image.Config is not nil if !imageFilters.MatchKVList("label", img.Config.Labels) { continue } } layerID := img.RootFS.ChainID() var size int64 if layerID != "" { l, err := daemon.layerStore.Get(layerID) if err != nil { return nil, err } size, err = l.Size() layer.ReleaseAndLog(daemon.layerStore, l) if err != nil { return nil, err } } newImage := newImage(img, size) for _, ref := range daemon.tagStore.References(id) { if filter != "" { // filter by tag/repo name if filterTagged { // filter by tag, require full ref match if ref.String() != filter { continue } } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact continue } } if _, ok := ref.(reference.Digested); ok { newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) } if _, ok := ref.(reference.Tagged); ok { newImage.RepoTags = append(newImage.RepoTags, ref.String()) } } if newImage.RepoDigests == nil && newImage.RepoTags == nil { if all || len(daemon.imageStore.Children(id)) == 0 { if filter != "" { // skip images with no references if filtering by tag continue } newImage.RepoDigests = []string{"<none>@<none>"} newImage.RepoTags = []string{"<none>:<none>"} } else { continue } } else if danglingOnly { continue } images = append(images, newImage) } sort.Sort(sort.Reverse(byCreated(images))) return images, nil }
// foldFilter generates the container filter based in the user's filtering options. func (daemon *Daemon) foldFilter(config *ContainersConfig) (*listContext, error) { psFilters, err := filters.FromParam(config.Filters) if err != nil { return nil, err } var filtExited []int err = psFilters.WalkValues("exited", func(value string) error { code, err := strconv.Atoi(value) if err != nil { return err } filtExited = append(filtExited, code) return nil }) if err != nil { return nil, err } err = psFilters.WalkValues("status", func(value string) error { if !container.IsValidStateString(value) { return fmt.Errorf("Unrecognised filter value for status: %s", value) } config.All = true return nil }) if err != nil { return nil, err } var beforeContFilter, sinceContFilter *container.Container err = psFilters.WalkValues("before", func(value string) error { beforeContFilter, err = daemon.GetContainer(value) return err }) if err != nil { return nil, err } err = psFilters.WalkValues("since", func(value string) error { sinceContFilter, err = daemon.GetContainer(value) return err }) if err != nil { return nil, err } imagesFilter := map[image.ID]bool{} var ancestorFilter bool if psFilters.Include("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { id, err := daemon.GetImageID(ancestor) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil } if imagesFilter[id] { // Already seen this ancestor, skip it return nil } // Then walk down the graph and put the imageIds in imagesFilter populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) return nil }) } names := make(map[string][]string) daemon.containerGraph().Walk("/", func(p string, e *graphdb.Entity) error { names[e.ID()] = append(names[e.ID()], p) return nil }, 1) if config.Before != "" && beforeContFilter == nil { beforeContFilter, err = daemon.GetContainer(config.Before) if err != nil { return nil, err } } if config.Since != "" && sinceContFilter == nil { sinceContFilter, err = daemon.GetContainer(config.Since) if err != nil { return nil, err } } return &listContext{ filters: psFilters, ancestorFilter: ancestorFilter, names: names, images: imagesFilter, exitAllowed: filtExited, beforeFilter: beforeContFilter, sinceFilter: sinceContFilter, ContainersConfig: config, }, nil }