func (m customImageMap) Set(value string) error { parts := strings.Split(value, ",") if len(parts) != 2 { return errors.New("invalid custome image format, expected \"name,reference\"") } ref, err := reference.Parse(parts[0]) if err != nil { return err } namedTagged, ok := ref.(reference.NamedTagged) if !ok { return fmt.Errorf("reference %s must contain name and tag", ref.String()) } source, err := reference.ParseNamed(parts[1]) if err != nil { return err } m[parts[0]] = CustomImage{ Source: source.String(), Target: namedTagged, } return nil }
func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer { return time.AfterFunc(ttl, func() { ttles.Lock() defer ttles.Unlock() var f expiryFunc switch entry.EntryType { case entryTypeBlob: f = ttles.onBlobExpire case entryTypeManifest: f = ttles.onManifestExpire default: f = func(reference.Reference) error { return fmt.Errorf("scheduler entry type") } } ref, err := reference.Parse(entry.Key) if err == nil { if err := f(ref); err != nil { context.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err) } } else { context.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err) } delete(ttles.entries, entry.Key) ttles.indexDirty = true }) }
// matchImageIDOnly checks that the given image specifier is a digest-only // reference, and that it matches the given image. func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool { // If the image ref is literally equal to the inspected image's ID, // just return true here (this might be the case for Docker 1.9, // where we won't have a digest for the ID) if inspected.ID == image { return true } // Otherwise, we should try actual parsing to be more correct ref, err := dockerref.Parse(image) if err != nil { glog.V(4).Infof("couldn't parse image reference %q: %v", image, err) return false } digest, isDigested := ref.(dockerref.Digested) if !isDigested { glog.V(4).Infof("the image reference %q was not a digest reference") return false } id, err := dockerdigest.ParseDigest(inspected.ID) if err != nil { glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err) return false } if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() { return true } glog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID) return false }
func ensureImage(cli DockerClient, image string) (string, error) { ctx := context.Background() info, _, err := cli.ImageInspectWithRaw(ctx, image, false) if err == nil { logrus.Debugf("Image found locally %s", image) return info.ID, nil } if !client.IsErrImageNotFound(err) { logrus.Errorf("Error inspecting image %q: %v", image, err) return "", err } // Image must be tagged reference if it does not exist ref, err := reference.Parse(image) if err != nil { logrus.Errorf("Image is not valid reference %q: %v", image, err) return "", err } tagged, ok := ref.(reference.NamedTagged) if !ok { logrus.Errorf("Tagged reference required %q", image) return "", errors.New("invalid reference, tag needed") } pullStart := time.Now() pullOptions := types.ImagePullOptions{ PrivilegeFunc: registryAuthNotSupported, } resp, err := cli.ImagePull(ctx, tagged.String(), pullOptions) if err != nil { logrus.Errorf("Error pulling image %q: %v", tagged.String(), err) return "", err } defer resp.Close() outFd, isTerminalOut := term.GetFdInfo(os.Stdout) if err = jsonmessage.DisplayJSONMessagesStream(resp, os.Stdout, outFd, isTerminalOut, nil); err != nil { logrus.Errorf("Error copying pull output: %v", err) return "", err } // TODO: Get pulled digest logFields := logrus.Fields{ timerKey: time.Since(pullStart), "image": tagged.String(), } logrus.WithFields(logFields).Info("image pulled") info, _, err = cli.ImageInspectWithRaw(ctx, tagged.String(), false) if err != nil { return "", nil } return info.ID, nil }
func testRefs(t *testing.T) (reference.Reference, reference.Reference, reference.Reference) { ref1, err := reference.Parse("testrepo@sha256:aaaaeaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") if err != nil { t.Fatalf("could not parse reference: %v", err) } ref2, err := reference.Parse("testrepo@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb") if err != nil { t.Fatalf("could not parse reference: %v", err) } ref3, err := reference.Parse("testrepo@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc") if err != nil { t.Fatalf("could not parse reference: %v", err) } return ref1, ref2, ref3 }
// parseImageName parses a docker image string into two parts: repo and tag. // If tag is empty, return the defaultImageTag. func ParseImageName(image string) (string, string) { ref, _ := reference.Parse(image) switch ref := ref.(type) { default: return ref.String(), defaultImageTag case reference.NamedTagged: return ref.Name(), ref.Tag() case reference.Named: return ref.Name(), defaultImageTag } }
func getNamedTagged(image string) (reference.NamedTagged, error) { ref, err := reference.Parse(image) if err != nil { return nil, err } named, ok := ref.(reference.NamedTagged) if !ok { return nil, fmt.Errorf("Image reference must have name and tag: %s", image) } return named, nil }
func assertTagged(image string) reference.NamedTagged { ref, err := reference.Parse(image) if err != nil { logrus.Panicf("Invalid reference %q: %v", image, err) } named, ok := ref.(reference.NamedTagged) if !ok { logrus.Panicf("Image reference must have name and tag: %s", image) } return named }
func newSuiteConfiguration(path string, config suiteConfiguration) (*configurationSuite, error) { customImages := make([]CustomImage, 0, len(config.CustomImages)) for _, value := range config.CustomImages { ref, err := reference.Parse(value.Tag) if err != nil { return nil, err } target, ok := ref.(reference.NamedTagged) if !ok { return nil, fmt.Errorf("expecting name:tag for image target, got %s", value.Tag) } customImages = append(customImages, CustomImage{ Source: value.Default, Target: target, }) } images := make([]reference.NamedTagged, 0, len(config.Images)) for _, image := range config.Images { named, err := getNamedTagged(image) if err != nil { return nil, err } images = append(images, named) } var base reference.NamedTagged if config.Base != "" { var err error base, err = getNamedTagged(config.Base) if err != nil { return nil, err } } name := config.Name if name == "" { name = filepath.Base(path) } return &configurationSuite{ config: config, path: path, base: base, customImages: customImages, images: images, resolvedName: name, }, nil }
func populateTestStorage( t *testing.T, driver driver.StorageDriver, setManagedByOpenShift bool, schemaVersion int, repoImages map[string]int, testImages map[string][]*imageapi.Image, ) (map[string][]*imageapi.Image, error) { ctx := context.Background() reg, err := storage.NewRegistry(ctx, driver) if err != nil { t.Fatalf("error creating registry: %v", err) } result := make(map[string][]*imageapi.Image) for key, value := range testImages { images := make([]*imageapi.Image, len(value)) copy(images, value) result[key] = images } for imageReference := range repoImages { parsed, err := reference.Parse(imageReference) if err != nil { t.Fatalf("failed to parse reference %q: %v", imageReference, err) } namedTagged, ok := parsed.(reference.NamedTagged) if !ok { t.Fatalf("expected NamedTagged reference, not %T", parsed) } imageCount := repoImages[imageReference] for i := 0; i < imageCount; i++ { img, err := storeTestImage(ctx, reg, namedTagged, schemaVersion, setManagedByOpenShift) if err != nil { t.Fatal(err) } arr := result[imageReference] t.Logf("created image %s@%s image with layers:", namedTagged.Name(), img.Name) for _, l := range img.DockerImageLayers { t.Logf(" %s of size %d", l.Name, l.LayerSize) } result[imageReference] = append(arr, img) } } return result, nil }
func mustImage(source, target, version string) CustomImage { ref, err := reference.Parse(target) if err != nil { panic(err) } namedTagged, ok := ref.(reference.NamedTagged) if !ok { panic("must provided named tagged for image target") } return CustomImage{ Source: source, Target: namedTagged, Version: version, } }
func tagImage(ctx context.Context, cli DockerClient, img, tag string) error { ref, err := reference.Parse(tag) if err != nil { return fmt.Errorf("invalid tag %s: %v", tag, err) } namedTagged, ok := ref.(reference.NamedTagged) if !ok { return fmt.Errorf("expecting named tagged reference: %s", tag) } tagOptions := types.ImageTagOptions{ Force: true, } if err := cli.ImageTag(ctx, img, namedTagged.String(), tagOptions); err != nil { return fmt.Errorf("error tagging image %s as %s: %v", img, tag, err) } return nil }
func tagImage(client *dockerclient.Client, img, tag string) error { ref, err := reference.Parse(tag) if err != nil { return fmt.Errorf("invalid tag %s: %v", tag, err) } namedTagged, ok := ref.(reference.NamedTagged) if !ok { return fmt.Errorf("expecting named tagged reference: %s", tag) } tagOptions := dockerclient.TagImageOptions{ Repo: namedTagged.Name(), Tag: namedTagged.Tag(), Force: true, } if err := client.TagImage(img, tagOptions); err != nil { return fmt.Errorf("error tagging image %s as %s: %v", img, tag, err) } return nil }
func ensureImage(client DockerClient, image string) (string, error) { info, err := client.InspectImage(image) if err == nil { logrus.Debugf("Image found locally %s", image) return info.ID, nil } if err != dockerclient.ErrNoSuchImage { logrus.Errorf("Error inspecting image %q: %v", image, err) return "", err } // Image must be tagged reference if it does not exist ref, err := reference.Parse(image) if err != nil { logrus.Debugf("Image is not valid reference %q: %v", image, err) } tagged, ok := ref.(reference.NamedTagged) if !ok { logrus.Debugf("Tagged reference required %q", image) return "", errors.New("invalid reference, tag needed") } logrus.Infof("Pulling image %s", tagged.String()) pullOptions := dockerclient.PullImageOptions{ Repository: tagged.Name(), Tag: tagged.Tag(), OutputStream: os.Stdout, } if err := client.PullImage(pullOptions, dockerclient.AuthConfiguration{}); err != nil { logrus.Errorf("Error pulling image %q: %v", tagged.String(), err) return "", err } // TODO: Get pulled digest and inspect by digest info, err = client.InspectImage(tagged.String()) if err != nil { return "", nil } return info.ID, nil }
func (m customImageMap) Set(value string) error { parts := strings.Split(value, ",") if len(parts) < 2 || len(parts) > 3 { return errors.New("invalid custom image format, expected \"name,reference[,version]\"") } ref, err := reference.Parse(parts[0]) if err != nil { return err } namedTagged, ok := ref.(reference.NamedTagged) if !ok { return fmt.Errorf("reference %s must contain name and tag", ref.String()) } source, err := reference.ParseNamed(parts[1]) if err != nil { return err } var version string if len(parts) == 3 { version = parts[2] } else if refTag, ok := source.(reference.Tagged); ok { version = refTag.Tag() } else { // TODO: In this case is it better to leave it blank and use the default // from the configuration file? version = namedTagged.Tag() } key := fmt.Sprintf("%s,%s", parts[0], parts[1]) m[key] = CustomImage{ Source: source.String(), Target: namedTagged, Version: version, } return nil }
// TestValidateManifest verifies the validateManifest function func TestValidateManifest(t *testing.T) { expectedDigest, err := reference.Parse("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") if err != nil { t.Fatal("could not parse reference") } expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") // Good manifest goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") if err != nil { t.Fatal("error reading fixture:", err) } var goodSignedManifest schema1.SignedManifest err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) if err != nil { t.Fatal("error unmarshaling manifest:", err) } verifiedManifest, err := verifyManifest(&goodSignedManifest, expectedDigest) if err != nil { t.Fatal("validateManifest failed:", err) } if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { t.Fatal("unexpected FSLayer in good manifest") } // "Extra data" manifest extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") if err != nil { t.Fatal("error reading fixture:", err) } var extraDataSignedManifest schema1.SignedManifest err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) if err != nil { t.Fatal("error unmarshaling manifest:", err) } verifiedManifest, err = verifyManifest(&extraDataSignedManifest, expectedDigest) if err != nil { t.Fatal("validateManifest failed:", err) } if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { t.Fatal("unexpected FSLayer in extra data manifest") } // Bad manifest badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") if err != nil { t.Fatal("error reading fixture:", err) } var badSignedManifest schema1.SignedManifest err = json.Unmarshal(badManifestBytes, &badSignedManifest) if err != nil { t.Fatal("error unmarshaling manifest:", err) } verifiedManifest, err = verifyManifest(&badSignedManifest, expectedDigest) if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { t.Fatal("expected validateManifest to fail with digest error") } }
func TestRepositoryBlobStat(t *testing.T) { quotaEnforcing = "aEnforcingConfig{} ctx := context.Background() // this driver holds all the testing blobs in memory during the whole test run driver := inmemory.New() // generate two images and store their blobs in the driver testImages, err := populateTestStorage(t, driver, true, 1, map[string]int{"nm/is:latest": 1, "nm/repo:missing-layer-links": 1}, nil) if err != nil { t.Fatal(err) } // generate an image and store its blobs in the driver; the resulting image will lack managed by openshift // annotation testImages, err = populateTestStorage(t, driver, false, 1, map[string]int{"nm/unmanaged:missing-layer-links": 1}, testImages) if err != nil { t.Fatal(err) } // remove layer repository links from two of the above images; keep the uploaded blobs in the global // blostore though for _, name := range []string{"nm/repo:missing-layer-links", "nm/unmanaged:missing-layer-links"} { repoName := strings.Split(name, ":")[0] for _, layer := range testImages[name][0].DockerImageLayers { dgst := digest.Digest(layer.Name) alg, hex := dgst.Algorithm(), dgst.Hex() err := driver.Delete(ctx, fmt.Sprintf("/docker/registry/v2/repositories/%s/_layers/%s/%s", repoName, alg, hex)) if err != nil { t.Fatalf("failed to delete layer link %q from repository %q: %v", layer.Name, repoName, err) } } } // generate random images without storing its blobs in the driver etcdOnlyImages := map[string]*imageapi.Image{} for _, d := range []struct { name string managed bool }{{"nm/is", true}, {"registry.org:5000/user/app", false}} { img, err := registrytest.NewImageForManifest(d.name, registrytest.SampleImageManifestSchema1, d.managed) if err != nil { t.Fatal(err) } etcdOnlyImages[d.name] = img } for _, tc := range []struct { name string stat string images []imageapi.Image imageStreams []imageapi.ImageStream pullthrough bool skipAuth bool deferredErrors deferredErrors expectedDescriptor distribution.Descriptor expectedError error expectedActions []clientAction }{ { name: "local stat", stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, imageStreams: []imageapi.ImageStream{{ObjectMeta: kapi.ObjectMeta{Namespace: "nm", Name: "is"}}}, expectedDescriptor: testNewDescriptorForLayer(testImages["nm/is:latest"][0].DockerImageLayers[0]), }, { name: "blob only tagged in image stream", stat: "nm/repo@" + testImages["nm/repo:missing-layer-links"][0].DockerImageLayers[1].Name, images: []imageapi.Image{*testImages["nm/repo:missing-layer-links"][0]}, imageStreams: []imageapi.ImageStream{ { ObjectMeta: kapi.ObjectMeta{ Namespace: "nm", Name: "repo", }, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": { Items: []imageapi.TagEvent{ { Image: testImages["nm/repo:missing-layer-links"][0].Name, }, }, }, }, }, }, }, expectedDescriptor: testNewDescriptorForLayer(testImages["nm/repo:missing-layer-links"][0].DockerImageLayers[1]), expectedActions: []clientAction{{"get", "imagestreams"}, {"get", "images"}}, }, { name: "blob referenced only by not managed image with pullthrough on", stat: "nm/unmanaged@" + testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1].Name, images: []imageapi.Image{*testImages["nm/unmanaged:missing-layer-links"][0]}, imageStreams: []imageapi.ImageStream{ { ObjectMeta: kapi.ObjectMeta{ Namespace: "nm", Name: "unmanaged", }, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": { Items: []imageapi.TagEvent{ { Image: testImages["nm/unmanaged:missing-layer-links"][0].Name, }, }, }, }, }, }, }, pullthrough: true, expectedDescriptor: testNewDescriptorForLayer(testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1]), expectedActions: []clientAction{{"get", "imagestreams"}, {"get", "images"}}, }, { // TODO: this should err out because of missing image stream. // Unfortunately, it's not the case. Until we start storing layer links in etcd, we depend on // local layer links. name: "layer link present while image stream not found", stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, images: []imageapi.Image{*testImages["nm/is:latest"][0]}, expectedDescriptor: testNewDescriptorForLayer(testImages["nm/is:latest"][0].DockerImageLayers[0]), }, { name: "blob only tagged by not managed image with pullthrough off", stat: "nm/repo@" + testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1].Name, images: []imageapi.Image{*testImages["nm/unmanaged:missing-layer-links"][0]}, imageStreams: []imageapi.ImageStream{ { ObjectMeta: kapi.ObjectMeta{ Namespace: "nm", Name: "repo", }, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": { Items: []imageapi.TagEvent{ { Image: testImages["nm/unmanaged:missing-layer-links"][0].DockerImageLayers[1].Name, }, }, }, }, }, }, }, expectedError: distribution.ErrBlobUnknown, expectedActions: []clientAction{{"get", "imagestreams"}, {"get", "images"}}, }, { name: "blob not stored locally but referred in image stream", stat: "nm/is@" + etcdOnlyImages["nm/is"].DockerImageLayers[1].Name, images: []imageapi.Image{*etcdOnlyImages["nm/is"]}, imageStreams: []imageapi.ImageStream{ { ObjectMeta: kapi.ObjectMeta{ Namespace: "nm", Name: "is", }, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": { Items: []imageapi.TagEvent{ { Image: etcdOnlyImages["nm/is"].Name, }, }, }, }, }, }, }, expectedError: distribution.ErrBlobUnknown, }, { name: "blob does not exist", stat: "nm/repo@" + etcdOnlyImages["nm/is"].DockerImageLayers[0].Name, images: []imageapi.Image{*testImages["nm/is:latest"][0]}, imageStreams: []imageapi.ImageStream{ { ObjectMeta: kapi.ObjectMeta{ Namespace: "nm", Name: "repo", }, Status: imageapi.ImageStreamStatus{ Tags: map[string]imageapi.TagEventList{ "latest": { Items: []imageapi.TagEvent{ { Image: testImages["nm/is:latest"][0].Name, }, }, }, }, }, }, }, expectedError: distribution.ErrBlobUnknown, }, { name: "auth not performed", stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, imageStreams: []imageapi.ImageStream{{ObjectMeta: kapi.ObjectMeta{Namespace: "nm", Name: "is"}}}, skipAuth: true, expectedError: fmt.Errorf("openshift.auth.completed missing from context"), }, { name: "deferred error", stat: "nm/is@" + testImages["nm/is:latest"][0].DockerImageLayers[0].Name, imageStreams: []imageapi.ImageStream{{ObjectMeta: kapi.ObjectMeta{Namespace: "nm", Name: "is"}}}, deferredErrors: deferredErrors{"nm/is": ErrOpenShiftAccessDenied}, expectedError: ErrOpenShiftAccessDenied, }, } { ref, err := reference.Parse(tc.stat) if err != nil { t.Errorf("[%s] failed to parse blob reference %q: %v", tc.name, tc.stat, err) continue } canonical, ok := ref.(reference.Canonical) if !ok { t.Errorf("[%s] not a canonical reference %q", tc.name, ref.String()) continue } cachedLayers, err = newDigestToRepositoryCache(defaultDigestToRepositoryCacheSize) if err != nil { t.Fatal(err) } ctx := context.Background() if !tc.skipAuth { ctx = WithAuthPerformed(ctx) } if tc.deferredErrors != nil { ctx = WithDeferredErrors(ctx, tc.deferredErrors) } client := &testclient.Fake{} client.AddReactor("get", "imagestreams", imagetest.GetFakeImageStreamGetHandler(t, tc.imageStreams...)) client.AddReactor("get", "images", registrytest.GetFakeImageGetHandler(t, tc.images...)) reg, err := newTestRegistry(ctx, client, driver, defaultBlobRepositoryCacheTTL, tc.pullthrough, true) if err != nil { t.Errorf("[%s] unexpected error: %v", tc.name, err) continue } repo, err := reg.Repository(ctx, canonical) if err != nil { t.Errorf("[%s] unexpected error: %v", tc.name, err) continue } desc, err := repo.Blobs(ctx).Stat(ctx, canonical.Digest()) if err != nil && tc.expectedError == nil { t.Errorf("[%s] got unexpected stat error: %v", tc.name, err) continue } if err == nil && tc.expectedError != nil { t.Errorf("[%s] got unexpected non-error", tc.name) continue } if !reflect.DeepEqual(err, tc.expectedError) { t.Errorf("[%s] got unexpected error: %s", tc.name, diff.ObjectGoPrintDiff(err, tc.expectedError)) continue } if tc.expectedError == nil && !reflect.DeepEqual(desc, tc.expectedDescriptor) { t.Errorf("[%s] got unexpected descriptor: %s", tc.name, diff.ObjectGoPrintDiff(desc, tc.expectedDescriptor)) } compareActions(t, tc.name, client.Actions(), tc.expectedActions) } }
// Images returns a filtered list of images. filterArgs is a JSON-encoded set // of filter arguments which will be interpreted by pkg/parsers/filters. // filter is a shell glob string applied to repository names. The argument // named all controls whether all images in the graph are filtered, or just // the heads. func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { var ( allImages map[image.ID]*image.Image err error danglingOnly = false ) imageFilters, err := filters.FromParam(filterArgs) if err != nil { return nil, err } for name := range imageFilters { if _, ok := acceptedImageFilterTags[name]; !ok { return nil, fmt.Errorf("Invalid filter '%s'", name) } } if i, ok := imageFilters["dangling"]; ok { for _, value := range i { if v := strings.ToLower(value); v == "true" { danglingOnly = true } else if v != "false" { return nil, fmt.Errorf("Invalid filter 'dangling=%s'", v) } } } if danglingOnly { allImages = daemon.imageStore.Heads() } else { allImages = daemon.imageStore.Map() } images := []*types.Image{} var filterTagged bool if filter != "" { filterRef, err := reference.Parse(filter) if err == nil { // parse error means wildcard repo if _, ok := filterRef.(reference.Tagged); ok { filterTagged = true } } } for id, img := range allImages { if _, ok := imageFilters["label"]; ok { if img.Config == nil { // Very old image that do not have image.Config (or even labels) continue } // We are now sure image.Config is not nil if !imageFilters.MatchKVList("label", img.Config.Labels) { continue } } layerID := img.RootFS.ChainID() var size int64 if layerID != "" { l, err := daemon.layerStore.Get(layerID) if err != nil { return nil, err } size, err = l.Size() layer.ReleaseAndLog(daemon.layerStore, l) if err != nil { return nil, err } } newImage := newImage(img, size) for _, ref := range daemon.tagStore.References(id) { if filter != "" { // filter by tag/repo name if filterTagged { // filter by tag, require full ref match if ref.String() != filter { continue } } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact continue } } if _, ok := ref.(reference.Digested); ok { newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) } if _, ok := ref.(reference.Tagged); ok { newImage.RepoTags = append(newImage.RepoTags, ref.String()) } } if newImage.RepoDigests == nil && newImage.RepoTags == nil { if all || len(daemon.imageStore.Children(id)) == 0 { if filter != "" { // skip images with no references if filtering by tag continue } newImage.RepoDigests = []string{"<none>@<none>"} newImage.RepoTags = []string{"<none>:<none>"} } else { continue } } else if danglingOnly { continue } images = append(images, newImage) } sort.Sort(sort.Reverse(byCreated(images))) return images, nil }
// NewBuilder creates a new builder. func NewBuilder(daemonURL string, tlsConfig *tls.Config, contextDirectory, dockerfilePath, repoTag string) (*Builder, error) { // Validate that the context directory exists. stat, err := os.Stat(contextDirectory) if err != nil { return nil, fmt.Errorf("unable to access build context directory: %s", err) } if !stat.IsDir() { return nil, fmt.Errorf("context must be a directory") } if dockerfilePath == "" { // Use Default path. dockerfilePath = filepath.Join(contextDirectory, "Dockerfile") } if _, err := os.Stat(dockerfilePath); err != nil { return nil, fmt.Errorf("unable to access build file: %s", err) } ref, err := reference.Parse(repoTag) if err != nil { if err != reference.ErrNameEmpty { return nil, fmt.Errorf("invalid tag: %s", err) } } client, err := dockerclient.NewDockerClient(daemonURL, tlsConfig) if err != nil { return nil, fmt.Errorf("unable to initialize client: %s", err) } b := &Builder{ daemonURL: daemonURL, tlsConfig: tlsConfig, client: client, contextDirectory: contextDirectory, dockerfilePath: dockerfilePath, ref: ref, out: os.Stdout, config: &config{ Labels: map[string]string{}, ExposedPorts: map[string]struct{}{}, Volumes: map[string]struct{}{}, }, } // Register Dockerfile Directive Handlers b.handlers = map[string]handlerFunc{ commands.Cmd: b.handleCmd, commands.Copy: b.handleCopy, commands.Entrypoint: b.handleEntrypoint, commands.Env: b.handleEnv, commands.Expose: b.handleExpose, commands.Extract: b.handleExtract, commands.From: b.handleFrom, commands.Label: b.handleLabel, commands.Maintainer: b.handleMaintainer, commands.Run: b.handleRun, commands.User: b.handleUser, commands.Volume: b.handleVolume, commands.Workdir: b.handleWorkdir, // Not implemented for now: commands.Add: b.handleAdd, commands.Onbuild: b.handleOnbuild, } if err := b.loadCache(); err != nil { return nil, fmt.Errorf("unable to load build cache: %s", err) } return b, nil }