// getFiltersQuery returns a url query with "filters" query term, based on the // filters provided. func getFiltersQuery(f filters.Args) (url.Values, error) { query := url.Values{} if f.Len() > 0 { filterJSON, err := filters.ToParam(f) if err != nil { return query, err } query.Set("filters", filterJSON) } return query, nil }
func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { accepted := map[string]bool{ "names": true, "name": true, "id": true, "label": true, } if err := filter.Validate(accepted); err != nil { return nil, err } return &swarmapi.ListSecretsRequest_Filters{ Names: filter.Get("names"), NamePrefixes: filter.Get("name"), IDPrefixes: filter.Get("id"), Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), }, nil }
// VolumeList returns the volumes configured in the docker host. func (cli *Client) VolumeList(filter filters.Args) (types.VolumesListResponse, error) { var volumes types.VolumesListResponse query := url.Values{} if filter.Len() > 0 { filterJSON, err := filters.ToParam(filter) if err != nil { return volumes, err } query.Set("filters", filterJSON) } resp, err := cli.get("/volumes", query, nil) if err != nil { return volumes, err } defer ensureReaderClosed(resp) err = json.NewDecoder(resp.body).Decode(&volumes) return volumes, err }
func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { until := time.Time{} if !pruneFilters.Include("until") { return until, nil } untilFilters := pruneFilters.Get("until") if len(untilFilters) > 1 { return until, fmt.Errorf("more than one until filter specified") } ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) if err != nil { return until, err } seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) if err != nil { return until, err } until = time.Unix(seconds, nanoseconds) return until, nil }
// VolumeList returns the volumes configured in the docker host. func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { var volumes volumetypes.VolumesListOKBody query := url.Values{} if filter.Len() > 0 { filterJSON, err := filters.ToParamWithVersion(cli.version, filter) if err != nil { return volumes, err } query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/volumes", query, nil) if err != nil { return volumes, err } err = json.NewDecoder(resp.body).Decode(&volumes) ensureReaderClosed(resp) return volumes, err }
// filterAllNetworks filter network list according to user specified filter // and return user chosen networks func filterNetworks(nws []libnetwork.Network, filter filters.Args) ([]libnetwork.Network, error) { // if filter is empty, return original network list if filter.Len() == 0 { return nws, nil } var displayNet []libnetwork.Network for fkey, fhandler := range supportedFilters { errFilter := filter.WalkValues(fkey, func(fval string) error { passList, err := fhandler(nws, fval) if err != nil { return err } displayNet = append(displayNet, passList...) return nil }) if errFilter != nil { return nil, errFilter } } return displayNet, nil }
// getEventFilter returns a filters.Filter for a set of filters func (daemon *Daemon) getEventFilter(filter filters.Args) *events.Filter { // incoming container filter can be name, id or partial id, convert to // a full container id for _, cn := range filter.Get("container") { c, err := daemon.GetContainer(cn) filter.Del("container", cn) if err == nil { filter.Add("container", c.ID) } } return events.NewFilter(filter, daemon.GetLabels) }
// filterVolumes filters volume list according to user specified filter // and returns user chosen volumes func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { // if filter is empty, return original volume list if filter.Len() == 0 { return vols, nil } var retVols []volume.Volume for _, vol := range vols { if filter.Include("name") { if !filter.Match("name", vol.Name()) { continue } } if filter.Include("driver") { if !filter.Match("driver", vol.DriverName()) { continue } } if filter.Include("label") { v, ok := vol.(volume.LabeledVolume) if !ok { continue } if !filter.MatchKVList("label", v.Labels()) { continue } } retVols = append(retVols, vol) } danglingOnly := false if filter.Include("dangling") { if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { danglingOnly = true } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) } retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) } return retVols, nil }
// ImagesPrune removes unused images func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) { rep := &types.ImagesPruneReport{} danglingOnly := true if pruneFilters.Include("dangling") { if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { danglingOnly = false } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling")) } } var allImages map[image.ID]*image.Image if danglingOnly { allImages = daemon.imageStore.Heads() } else { allImages = daemon.imageStore.Map() } allContainers := daemon.List() imageRefs := map[string]bool{} for _, c := range allContainers { imageRefs[c.ID] = true } // Filter intermediary images and get their unique size allLayers := daemon.layerStore.Map() topImages := map[image.ID]*image.Image{} for id, img := range allImages { dgst := digest.Digest(id) if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 { continue } topImages[id] = img } for id := range topImages { dgst := digest.Digest(id) hex := dgst.Hex() if _, ok := imageRefs[hex]; ok { continue } deletedImages := []types.ImageDelete{} refs := daemon.referenceStore.References(dgst) if len(refs) > 0 { if danglingOnly { // Not a dangling image continue } nrRefs := len(refs) for _, ref := range refs { // If nrRefs == 1, we have an image marked as myreponame:<none> // i.e. the tag content was changed if _, ok := ref.(reference.Canonical); ok && nrRefs > 1 { continue } imgDel, err := daemon.ImageDelete(ref.String(), false, true) if err != nil { logrus.Warnf("could not delete reference %s: %v", ref.String(), err) continue } deletedImages = append(deletedImages, imgDel...) } } else { imgDel, err := daemon.ImageDelete(hex, false, true) if err != nil { logrus.Warnf("could not delete image %s: %v", hex, err) continue } deletedImages = append(deletedImages, imgDel...) } rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) } // Compute how much space was freed for _, d := range rep.ImagesDeleted { if d.Deleted != "" { chid := layer.ChainID(d.Deleted) if l, ok := allLayers[chid]; ok { diffSize, err := l.DiffSize() if err != nil { logrus.Warnf("failed to get layer %s size: %v", chid, err) continue } rep.SpaceReclaimed += uint64(diffSize) } } } return rep, nil }
// Images returns a filtered list of images. filterArgs is a JSON-encoded set // of filter arguments which will be interpreted by api/types/filters. // filter is a shell glob string applied to repository names. The argument // named all controls whether all images in the graph are filtered, or just // the heads. func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { var ( allImages map[image.ID]*image.Image err error danglingOnly = false ) if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { return nil, err } if imageFilters.Include("dangling") { if imageFilters.ExactMatch("dangling", "true") { danglingOnly = true } else if !imageFilters.ExactMatch("dangling", "false") { return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) } } if danglingOnly { allImages = daemon.imageStore.Heads() } else { allImages = daemon.imageStore.Map() } var beforeFilter, sinceFilter *image.Image err = imageFilters.WalkValues("before", func(value string) error { beforeFilter, err = daemon.GetImage(value) return err }) if err != nil { return nil, err } err = imageFilters.WalkValues("since", func(value string) error { sinceFilter, err = daemon.GetImage(value) return err }) if err != nil { return nil, err } images := []*types.ImageSummary{} var imagesMap map[*image.Image]*types.ImageSummary var layerRefs map[layer.ChainID]int var allLayers map[layer.ChainID]layer.Layer var allContainers []*container.Container for id, img := range allImages { if beforeFilter != nil { if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { continue } } if sinceFilter != nil { if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { continue } } if imageFilters.Include("label") { // Very old image that do not have image.Config (or even labels) if img.Config == nil { continue } // We are now sure image.Config is not nil if !imageFilters.MatchKVList("label", img.Config.Labels) { continue } } layerID := img.RootFS.ChainID() var size int64 if layerID != "" { l, err := daemon.layerStore.Get(layerID) if err != nil { return nil, err } size, err = l.Size() layer.ReleaseAndLog(daemon.layerStore, l) if err != nil { return nil, err } } newImage := newImage(img, size) for _, ref := range daemon.referenceStore.References(id.Digest()) { if imageFilters.Include("reference") { var found bool var matchErr error for _, pattern := range imageFilters.Get("reference") { found, matchErr = reference.Match(pattern, ref) if matchErr != nil { return nil, matchErr } } if !found { continue } } if _, ok := ref.(reference.Canonical); ok { newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) } if _, ok := ref.(reference.NamedTagged); ok { newImage.RepoTags = append(newImage.RepoTags, ref.String()) } } if newImage.RepoDigests == nil && newImage.RepoTags == nil { if all || len(daemon.imageStore.Children(id)) == 0 { if imageFilters.Include("dangling") && !danglingOnly { //dangling=false case, so dangling image is not needed continue } if imageFilters.Include("reference") { // skip images with no references if filtering by reference continue } newImage.RepoDigests = []string{"<none>@<none>"} newImage.RepoTags = []string{"<none>:<none>"} } else { continue } } else if danglingOnly && len(newImage.RepoTags) > 0 { continue } if withExtraAttrs { // lazyly init variables if imagesMap == nil { allContainers = daemon.List() allLayers = daemon.layerStore.Map() imagesMap = make(map[*image.Image]*types.ImageSummary) layerRefs = make(map[layer.ChainID]int) } // Get container count newImage.Containers = 0 for _, c := range allContainers { if c.ImageID == id { newImage.Containers++ } } // count layer references rootFS := *img.RootFS rootFS.DiffIDs = nil for _, id := range img.RootFS.DiffIDs { rootFS.Append(id) chid := rootFS.ChainID() layerRefs[chid]++ if _, ok := allLayers[chid]; !ok { return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) } } imagesMap[img] = newImage } images = append(images, newImage) } if withExtraAttrs { // Get Shared and Unique sizes for img, newImage := range imagesMap { rootFS := *img.RootFS rootFS.DiffIDs = nil newImage.SharedSize = 0 for _, id := range img.RootFS.DiffIDs { rootFS.Append(id) chid := rootFS.ChainID() diffSize, err := allLayers[chid].DiffSize() if err != nil { return nil, err } if layerRefs[chid] > 1 { newImage.SharedSize += diffSize } } } } sort.Sort(sort.Reverse(byCreated(images))) return images, nil }
func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { accepted := map[string]bool{ "name": true, "id": true, "label": true, "service": true, "node": true, "desired-state": true, } if err := filter.Validate(accepted); err != nil { return nil, err } if transformFunc != nil { if err := transformFunc(filter); err != nil { return nil, err } } f := &swarmapi.ListTasksRequest_Filters{ NamePrefixes: filter.Get("name"), IDPrefixes: filter.Get("id"), Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), ServiceIDs: filter.Get("service"), NodeIDs: filter.Get("node"), } for _, s := range filter.Get("desired-state") { if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) } else if s != "" { return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) } } return f, nil }
func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { accepted := map[string]bool{ "name": true, "id": true, "label": true, "role": true, "membership": true, } if err := filter.Validate(accepted); err != nil { return nil, err } f := &swarmapi.ListNodesRequest_Filters{ NamePrefixes: filter.Get("name"), IDPrefixes: filter.Get("id"), Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), } for _, r := range filter.Get("role") { if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { f.Roles = append(f.Roles, swarmapi.NodeRole(role)) } else if r != "" { return nil, fmt.Errorf("Invalid role filter: '%s'", r) } } for _, a := range filter.Get("membership") { if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) } else if a != "" { return nil, fmt.Errorf("Invalid membership filter: '%s'", a) } } return f, nil }
// filterNetworks filters network list according to user specified filter // and returns user chosen networks func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { // if filter is empty, return original network list if filter.Len() == 0 { return nws, nil } if err := filter.Validate(acceptedNetworkFilters); err != nil { return nil, err } displayNet := []types.NetworkResource{} for _, nw := range nws { if filter.Include("driver") { if !filter.ExactMatch("driver", nw.Driver) { continue } } if filter.Include("name") { if !filter.Match("name", nw.Name) { continue } } if filter.Include("id") { if !filter.Match("id", nw.ID) { continue } } if filter.Include("label") { if !filter.MatchKVList("label", nw.Labels) { continue } } displayNet = append(displayNet, nw) } if filter.Include("type") { var typeNet []types.NetworkResource errFilter := filter.WalkValues("type", func(fval string) error { passList, err := filterNetworkByType(displayNet, fval) if err != nil { return err } typeNet = append(typeNet, passList...) return nil }) if errFilter != nil { return nil, errFilter } displayNet = typeNet } return displayNet, nil }