Exemple #1
0
// filterNetworks filters network list according to user specified filter
// and returns user chosen networks
func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) {
	// if filter is empty, return original network list
	if filter.Len() == 0 {
		return nws, nil
	}

	if err := filter.Validate(acceptedNetworkFilters); err != nil {
		return nil, err
	}

	displayNet := []types.NetworkResource{}
	for _, nw := range nws {
		if filter.Include("driver") {
			if !filter.ExactMatch("driver", nw.Driver) {
				continue
			}
		}
		if filter.Include("name") {
			if !filter.Match("name", nw.Name) {
				continue
			}
		}
		if filter.Include("id") {
			if !filter.Match("id", nw.ID) {
				continue
			}
		}
		if filter.Include("label") {
			if !filter.MatchKVList("label", nw.Labels) {
				continue
			}
		}
		displayNet = append(displayNet, nw)
	}

	if filter.Include("type") {
		var typeNet []types.NetworkResource
		errFilter := filter.WalkValues("type", func(fval string) error {
			passList, err := filterNetworkByType(displayNet, fval)
			if err != nil {
				return err
			}
			typeNet = append(typeNet, passList...)
			return nil
		})
		if errFilter != nil {
			return nil, errFilter
		}
		displayNet = typeNet
	}

	return displayNet, nil
}
Exemple #2
0
func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) {
	until := time.Time{}
	if !pruneFilters.Include("until") {
		return until, nil
	}
	untilFilters := pruneFilters.Get("until")
	if len(untilFilters) > 1 {
		return until, fmt.Errorf("more than one until filter specified")
	}
	ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now())
	if err != nil {
		return until, err
	}
	seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0)
	if err != nil {
		return until, err
	}
	until = time.Unix(seconds, nanoseconds)
	return until, nil
}
Exemple #3
0
// filterVolumes filters volume list according to user specified filter
// and returns user chosen volumes
func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) {
	// if filter is empty, return original volume list
	if filter.Len() == 0 {
		return vols, nil
	}

	var retVols []volume.Volume
	for _, vol := range vols {
		if filter.Include("name") {
			if !filter.Match("name", vol.Name()) {
				continue
			}
		}
		if filter.Include("driver") {
			if !filter.Match("driver", vol.DriverName()) {
				continue
			}
		}
		if filter.Include("label") {
			v, ok := vol.(volume.LabeledVolume)
			if !ok {
				continue
			}
			if !filter.MatchKVList("label", v.Labels()) {
				continue
			}
		}
		retVols = append(retVols, vol)
	}
	danglingOnly := false
	if filter.Include("dangling") {
		if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") {
			danglingOnly = true
		} else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") {
			return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling"))
		}
		retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly)
	}
	return retVols, nil
}
Exemple #4
0
// ImagesPrune removes unused images
func (daemon *Daemon) ImagesPrune(pruneFilters filters.Args) (*types.ImagesPruneReport, error) {
	rep := &types.ImagesPruneReport{}

	danglingOnly := true
	if pruneFilters.Include("dangling") {
		if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") {
			danglingOnly = false
		} else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") {
			return nil, fmt.Errorf("Invalid filter 'dangling=%s'", pruneFilters.Get("dangling"))
		}
	}

	var allImages map[image.ID]*image.Image
	if danglingOnly {
		allImages = daemon.imageStore.Heads()
	} else {
		allImages = daemon.imageStore.Map()
	}
	allContainers := daemon.List()
	imageRefs := map[string]bool{}
	for _, c := range allContainers {
		imageRefs[c.ID] = true
	}

	// Filter intermediary images and get their unique size
	allLayers := daemon.layerStore.Map()
	topImages := map[image.ID]*image.Image{}
	for id, img := range allImages {
		dgst := digest.Digest(id)
		if len(daemon.referenceStore.References(dgst)) == 0 && len(daemon.imageStore.Children(id)) != 0 {
			continue
		}
		topImages[id] = img
	}

	for id := range topImages {
		dgst := digest.Digest(id)
		hex := dgst.Hex()
		if _, ok := imageRefs[hex]; ok {
			continue
		}

		deletedImages := []types.ImageDelete{}
		refs := daemon.referenceStore.References(dgst)
		if len(refs) > 0 {
			if danglingOnly {
				// Not a dangling image
				continue
			}

			nrRefs := len(refs)
			for _, ref := range refs {
				// If nrRefs == 1, we have an image marked as myreponame:<none>
				// i.e. the tag content was changed
				if _, ok := ref.(reference.Canonical); ok && nrRefs > 1 {
					continue
				}
				imgDel, err := daemon.ImageDelete(ref.String(), false, true)
				if err != nil {
					logrus.Warnf("could not delete reference %s: %v", ref.String(), err)
					continue
				}
				deletedImages = append(deletedImages, imgDel...)
			}
		} else {
			imgDel, err := daemon.ImageDelete(hex, false, true)
			if err != nil {
				logrus.Warnf("could not delete image %s: %v", hex, err)
				continue
			}
			deletedImages = append(deletedImages, imgDel...)
		}

		rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...)
	}

	// Compute how much space was freed
	for _, d := range rep.ImagesDeleted {
		if d.Deleted != "" {
			chid := layer.ChainID(d.Deleted)
			if l, ok := allLayers[chid]; ok {
				diffSize, err := l.DiffSize()
				if err != nil {
					logrus.Warnf("failed to get layer %s size: %v", chid, err)
					continue
				}
				rep.SpaceReclaimed += uint64(diffSize)
			}
		}
	}

	return rep, nil
}
Exemple #5
0
// Images returns a filtered list of images. filterArgs is a JSON-encoded set
// of filter arguments which will be interpreted by api/types/filters.
// filter is a shell glob string applied to repository names. The argument
// named all controls whether all images in the graph are filtered, or just
// the heads.
func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
	var (
		allImages    map[image.ID]*image.Image
		err          error
		danglingOnly = false
	)

	if err := imageFilters.Validate(acceptedImageFilterTags); err != nil {
		return nil, err
	}

	if imageFilters.Include("dangling") {
		if imageFilters.ExactMatch("dangling", "true") {
			danglingOnly = true
		} else if !imageFilters.ExactMatch("dangling", "false") {
			return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling"))
		}
	}
	if danglingOnly {
		allImages = daemon.imageStore.Heads()
	} else {
		allImages = daemon.imageStore.Map()
	}

	var beforeFilter, sinceFilter *image.Image
	err = imageFilters.WalkValues("before", func(value string) error {
		beforeFilter, err = daemon.GetImage(value)
		return err
	})
	if err != nil {
		return nil, err
	}

	err = imageFilters.WalkValues("since", func(value string) error {
		sinceFilter, err = daemon.GetImage(value)
		return err
	})
	if err != nil {
		return nil, err
	}

	images := []*types.ImageSummary{}
	var imagesMap map[*image.Image]*types.ImageSummary
	var layerRefs map[layer.ChainID]int
	var allLayers map[layer.ChainID]layer.Layer
	var allContainers []*container.Container

	for id, img := range allImages {
		if beforeFilter != nil {
			if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) {
				continue
			}
		}

		if sinceFilter != nil {
			if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) {
				continue
			}
		}

		if imageFilters.Include("label") {
			// Very old image that do not have image.Config (or even labels)
			if img.Config == nil {
				continue
			}
			// We are now sure image.Config is not nil
			if !imageFilters.MatchKVList("label", img.Config.Labels) {
				continue
			}
		}

		layerID := img.RootFS.ChainID()
		var size int64
		if layerID != "" {
			l, err := daemon.layerStore.Get(layerID)
			if err != nil {
				return nil, err
			}

			size, err = l.Size()
			layer.ReleaseAndLog(daemon.layerStore, l)
			if err != nil {
				return nil, err
			}
		}

		newImage := newImage(img, size)

		for _, ref := range daemon.referenceStore.References(id.Digest()) {
			if imageFilters.Include("reference") {
				var found bool
				var matchErr error
				for _, pattern := range imageFilters.Get("reference") {
					found, matchErr = reference.Match(pattern, ref)
					if matchErr != nil {
						return nil, matchErr
					}
				}
				if !found {
					continue
				}
			}
			if _, ok := ref.(reference.Canonical); ok {
				newImage.RepoDigests = append(newImage.RepoDigests, ref.String())
			}
			if _, ok := ref.(reference.NamedTagged); ok {
				newImage.RepoTags = append(newImage.RepoTags, ref.String())
			}
		}
		if newImage.RepoDigests == nil && newImage.RepoTags == nil {
			if all || len(daemon.imageStore.Children(id)) == 0 {

				if imageFilters.Include("dangling") && !danglingOnly {
					//dangling=false case, so dangling image is not needed
					continue
				}
				if imageFilters.Include("reference") { // skip images with no references if filtering by reference
					continue
				}
				newImage.RepoDigests = []string{"<none>@<none>"}
				newImage.RepoTags = []string{"<none>:<none>"}
			} else {
				continue
			}
		} else if danglingOnly && len(newImage.RepoTags) > 0 {
			continue
		}

		if withExtraAttrs {
			// lazyly init variables
			if imagesMap == nil {
				allContainers = daemon.List()
				allLayers = daemon.layerStore.Map()
				imagesMap = make(map[*image.Image]*types.ImageSummary)
				layerRefs = make(map[layer.ChainID]int)
			}

			// Get container count
			newImage.Containers = 0
			for _, c := range allContainers {
				if c.ImageID == id {
					newImage.Containers++
				}
			}

			// count layer references
			rootFS := *img.RootFS
			rootFS.DiffIDs = nil
			for _, id := range img.RootFS.DiffIDs {
				rootFS.Append(id)
				chid := rootFS.ChainID()
				layerRefs[chid]++
				if _, ok := allLayers[chid]; !ok {
					return nil, fmt.Errorf("layer %v was not found (corruption?)", chid)
				}
			}
			imagesMap[img] = newImage
		}

		images = append(images, newImage)
	}

	if withExtraAttrs {
		// Get Shared and Unique sizes
		for img, newImage := range imagesMap {
			rootFS := *img.RootFS
			rootFS.DiffIDs = nil

			newImage.SharedSize = 0
			for _, id := range img.RootFS.DiffIDs {
				rootFS.Append(id)
				chid := rootFS.ChainID()

				diffSize, err := allLayers[chid].DiffSize()
				if err != nil {
					return nil, err
				}

				if layerRefs[chid] > 1 {
					newImage.SharedSize += diffSize
				}
			}
		}
	}

	sort.Sort(sort.Reverse(byCreated(images)))

	return images, nil
}