Exemple #1
1
// GetImageID returns an image ID corresponding to the image referred to by
// refOrID.
func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
	id, ref, err := reference.ParseIDOrReference(refOrID)
	if err != nil {
		return "", err
	}
	if id != "" {
		if _, err := daemon.imageStore.Get(image.ID(id)); err != nil {
			return "", ErrImageDoesNotExist{refOrID}
		}
		return image.ID(id), nil
	}

	if id, err := daemon.referenceStore.Get(ref); err == nil {
		return id, nil
	}
	if tagged, ok := ref.(reference.NamedTagged); ok {
		if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil {
			for _, namedRef := range daemon.referenceStore.References(id) {
				if namedRef.Name() == ref.Name() {
					return id, nil
				}
			}
		}
	}

	// Search based on ID
	if id, err := daemon.imageStore.Search(refOrID); err == nil {
		return id, nil
	}

	return "", ErrImageDoesNotExist{refOrID}
}
Exemple #2
0
// GetImageID returns an image ID corresponding to the image referred to by
// refOrID.
func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) {
	// Treat as an ID
	if id, err := digest.ParseDigest(refOrID); err == nil {
		if _, err := daemon.imageStore.Get(image.ID(id)); err != nil {
			return "", ErrImageDoesNotExist{refOrID}
		}
		return image.ID(id), nil
	}

	// Treat it as a possible tag or digest reference
	if ref, err := reference.ParseNamed(refOrID); err == nil {
		if id, err := daemon.referenceStore.Get(ref); err == nil {
			return id, nil
		}
		if tagged, ok := ref.(reference.NamedTagged); ok {
			if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil {
				for _, namedRef := range daemon.referenceStore.References(id) {
					if namedRef.Name() == ref.Name() {
						return id, nil
					}
				}
			}
		}
	}

	// Search based on ID
	if id, err := daemon.imageStore.Search(refOrID); err == nil {
		return id, nil
	}

	return "", ErrImageDoesNotExist{refOrID}
}
Exemple #3
0
func (ic *imageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) {
	imgID, err := ic.localImageCache.GetCache(parentID, cfg)
	if err != nil {
		return "", err
	}
	if imgID != "" {
		for _, s := range ic.sources {
			if ic.isParent(s.ID(), image.ID(imgID)) {
				return imgID, nil
			}
		}
	}

	var parent *image.Image
	lenHistory := 0
	if parentID != "" {
		parent, err = ic.daemon.imageStore.Get(image.ID(parentID))
		if err != nil {
			return "", errors.Wrapf(err, "unable to find image %v", parentID)
		}
		lenHistory = len(parent.History)
	}

	for _, target := range ic.sources {
		if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) {
			continue
		}

		if len(target.History)-1 == lenHistory { // last
			if parent != nil {
				if err := ic.daemon.imageStore.SetParent(target.ID(), parent.ID()); err != nil {
					return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID())
				}
			}
			return target.ID().String(), nil
		}

		imgID, err := ic.restoreCachedImage(parent, target, cfg)
		if err != nil {
			return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID())
		}

		ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm
		return imgID.String(), nil
	}

	return "", nil
}
Exemple #4
0
// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent`
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) {
	cache, err := daemon.GetCachedImage(image.ID(imgID), cfg)
	if cache == nil || err != nil {
		return "", err
	}
	return cache.ID().String(), nil
}
Exemple #5
0
// GetCachedImage returns a reference to a cached image whose parent equals `parent`
// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error.
func (d Docker) GetCachedImage(imgID string, cfg *container.Config) (string, error) {
	cache, err := d.Daemon.ImageGetCached(image.ID(imgID), cfg)
	if cache == nil || err != nil {
		return "", err
	}
	return cache.ID().String(), nil
}
Exemple #6
0
func TestInvalidTags(t *testing.T) {
	tmpDir, err := ioutil.TempDir("", "tag-store-test")
	defer os.RemoveAll(tmpDir)

	store, err := NewTagStore(filepath.Join(tmpDir, "repositories.json"))
	if err != nil {
		t.Fatalf("error creating tag store: %v", err)
	}
	id := image.ID("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6")

	// sha256 as repo name
	ref, err := reference.ParseNamed("sha256:abc")
	if err != nil {
		t.Fatal(err)
	}
	err = store.AddTag(ref, id, true)
	if err == nil {
		t.Fatalf("expected setting tag %q to fail", ref)
	}

	// setting digest as a tag
	ref, err = reference.ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6")
	if err != nil {
		t.Fatal(err)
	}
	err = store.AddTag(ref, id, true)
	if err == nil {
		t.Fatalf("expected setting digest %q to fail", ref)
	}

}
Exemple #7
0
func TestMigrateRefs(t *testing.T) {
	tmpdir, err := ioutil.TempDir("", "migrate-tags")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpdir)

	ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600)

	ta := &mockTagAdder{}
	err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{
		"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"),
		"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"),
		"abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"),
	})
	if err != nil {
		t.Fatal(err)
	}

	expected := map[string]string{
		"busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9",
		"busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9",
		"registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae",
	}

	if !reflect.DeepEqual(expected, ta.refs) {
		t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs)
	}

	// second migration is no-op
	ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600)
	err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{
		"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"),
	})
	if err != nil {
		t.Fatal(err)
	}
	if !reflect.DeepEqual(expected, ta.refs) {
		t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs)
	}
}
func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) {
	imgDescr := make(map[image.ID]*imageDescriptor)

	addAssoc := func(id image.ID, ref reference.Named) {
		if _, ok := imgDescr[id]; !ok {
			imgDescr[id] = &imageDescriptor{}
		}

		if ref != nil {
			var tagged reference.NamedTagged
			if _, ok := ref.(reference.Canonical); ok {
				return
			}
			var ok bool
			if tagged, ok = ref.(reference.NamedTagged); !ok {
				var err error
				if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil {
					return
				}
			}

			for _, t := range imgDescr[id].refs {
				if tagged.String() == t.String() {
					return
				}
			}
			imgDescr[id].refs = append(imgDescr[id].refs, tagged)
		}
	}

	for _, name := range names {
		id, ref, err := reference.ParseIDOrReference(name)
		if err != nil {
			return nil, err
		}
		if id != "" {
			_, err := l.is.Get(image.ID(id))
			if err != nil {
				return nil, err
			}
			addAssoc(image.ID(id), nil)
			continue
		}
		if ref.Name() == string(digest.Canonical) {
			imgID, err := l.is.Search(name)
			if err != nil {
				return nil, err
			}
			addAssoc(imgID, nil)
			continue
		}
		if reference.IsNameOnly(ref) {
			assocs := l.rs.ReferencesByName(ref)
			for _, assoc := range assocs {
				addAssoc(assoc.ImageID, assoc.Ref)
			}
			if len(assocs) == 0 {
				imgID, err := l.is.Search(name)
				if err != nil {
					return nil, err
				}
				addAssoc(imgID, nil)
			}
			continue
		}
		var imgID image.ID
		if imgID, err = l.rs.Get(ref); err != nil {
			return nil, err
		}
		addAssoc(imgID, ref)

	}
	return imgDescr, nil
}
Exemple #9
0
// build runs the Dockerfile builder from a context and a docker object that allows to make calls
// to Docker.
//
// This will (barring errors):
//
// * read the dockerfile from context
// * parse the dockerfile if not already parsed
// * walk the AST and execute it by dispatching to handlers. If Remove
//   or ForceRemove is set, additional cleanup around containers happens after
//   processing.
// * Tag image, if applicable.
// * Print a happy message and return the image ID.
//
func (b *Builder) build(config *types.ImageBuildOptions, context builder.Context, stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) {
	b.options = config
	b.context = context
	b.Stdout = stdout
	b.Stderr = stderr
	b.Output = out

	// If Dockerfile was not parsed yet, extract it from the Context
	if b.dockerfile == nil {
		if err := b.readDockerfile(); err != nil {
			return "", err
		}
	}

	repoAndTags, err := sanitizeRepoAndTags(config.Tags)
	if err != nil {
		return "", err
	}

	var shortImgID string
	for i, n := range b.dockerfile.Children {
		// we only want to add labels to the last layer
		if i == len(b.dockerfile.Children)-1 {
			b.addLabels()
		}
		select {
		case <-b.clientCtx.Done():
			logrus.Debug("Builder: build cancelled!")
			fmt.Fprintf(b.Stdout, "Build cancelled")
			return "", fmt.Errorf("Build cancelled")
		default:
			// Not cancelled yet, keep going...
		}
		if err := b.dispatch(i, n); err != nil {
			if b.options.ForceRemove {
				b.clearTmp()
			}
			return "", err
		}

		// Commit the layer when there are only one children in
		// the dockerfile, this is only the `FROM` tag, and
		// build labels. Otherwise, the new image won't be
		// labeled properly.
		// Commit here, so the ID of the final image is reported
		// properly.
		if len(b.dockerfile.Children) == 1 && len(b.options.Labels) > 0 {
			b.commit("", b.runConfig.Cmd, "")
		}

		shortImgID = stringid.TruncateID(b.image)
		fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID)
		if b.options.Remove {
			b.clearTmp()
		}
	}

	// check if there are any leftover build-args that were passed but not
	// consumed during build. Return an error, if there are any.
	leftoverArgs := []string{}
	for arg := range b.options.BuildArgs {
		if !b.isBuildArgAllowed(arg) {
			leftoverArgs = append(leftoverArgs, arg)
		}
	}
	if len(leftoverArgs) > 0 {
		return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs)
	}

	if b.image == "" {
		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
	}

	imageID := image.ID(b.image)
	for _, rt := range repoAndTags {
		if err := b.docker.TagImageWithReference(imageID, rt); err != nil {
			return "", err
		}
	}

	fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID)
	return b.image, nil
}
Exemple #10
0
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
	manifestDigest, err = schema2ManifestDigest(ref, mfst)
	if err != nil {
		return "", "", err
	}

	target := mfst.Target()
	imageID = image.ID(target.Digest)
	if _, err := p.config.ImageStore.Get(imageID); err == nil {
		// If the image already exists locally, no need to pull
		// anything.
		return imageID, manifestDigest, nil
	}

	configChan := make(chan []byte, 1)
	errChan := make(chan error, 1)
	var cancel func()
	ctx, cancel = context.WithCancel(ctx)

	// Pull the image config
	go func() {
		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
		if err != nil {
			errChan <- err
			cancel()
			return
		}
		configChan <- configJSON
	}()

	var descriptors []xfer.DownloadDescriptor

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for _, d := range mfst.References() {
		layerDescriptor := &v2LayerDescriptor{
			digest:         d.Digest,
			repo:           p.repo,
			blobSumService: p.blobSumService,
		}

		descriptors = append(descriptors, layerDescriptor)
	}

	var (
		configJSON         []byte       // raw serialized image config
		unmarshalledConfig image.Image  // deserialized image config
		downloadRootFS     image.RootFS // rootFS to use for registering layers.
	)
	if runtime.GOOS == "windows" {
		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
		if err != nil {
			return "", "", err
		}
		if unmarshalledConfig.RootFS == nil {
			return "", "", errors.New("image config has no rootfs section")
		}
		downloadRootFS = *unmarshalledConfig.RootFS
		downloadRootFS.DiffIDs = []layer.DiffID{}
	} else {
		downloadRootFS = *image.NewRootFS()
	}

	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
	if err != nil {
		if configJSON != nil {
			// Already received the config
			return "", "", err
		}
		select {
		case err = <-errChan:
			return "", "", err
		default:
			cancel()
			select {
			case <-configChan:
			case <-errChan:
			}
			return "", "", err
		}
	}
	defer release()

	if configJSON == nil {
		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
		if err != nil {
			return "", "", err
		}
	}

	// The DiffIDs returned in rootFS MUST match those in the config.
	// Otherwise the image config could be referencing layers that aren't
	// included in the manifest.
	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
		return "", "", errRootFSMismatch
	}

	for i := range rootFS.DiffIDs {
		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
			return "", "", errRootFSMismatch
		}
	}

	imageID, err = p.config.ImageStore.Create(configJSON)
	if err != nil {
		return "", "", err
	}

	return imageID, manifestDigest, nil
}
Exemple #11
0
// build runs the Dockerfile builder from a context and a docker object that allows to make calls
// to Docker.
//
// This will (barring errors):
//
// * read the dockerfile from context
// * parse the dockerfile if not already parsed
// * walk the AST and execute it by dispatching to handlers. If Remove
//   or ForceRemove is set, additional cleanup around containers happens after
//   processing.
// * Tag image, if applicable.
// * Print a happy message and return the image ID.
//
func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) {
	b.Stdout = stdout
	b.Stderr = stderr
	b.Output = out

	// If Dockerfile was not parsed yet, extract it from the Context
	if b.dockerfile == nil {
		if err := b.readDockerfile(); err != nil {
			return "", err
		}
	}

	repoAndTags, err := sanitizeRepoAndTags(b.options.Tags)
	if err != nil {
		return "", err
	}

	if len(b.options.Labels) > 0 {
		line := "LABEL "
		for k, v := range b.options.Labels {
			line += fmt.Sprintf("%q='%s' ", k, v)
		}
		_, node, err := parser.ParseLine(line, &b.directive, false)
		if err != nil {
			return "", err
		}
		b.dockerfile.Children = append(b.dockerfile.Children, node)
	}

	var shortImgID string
	total := len(b.dockerfile.Children)
	for _, n := range b.dockerfile.Children {
		if err := b.checkDispatch(n, false); err != nil {
			return "", err
		}
	}

	for i, n := range b.dockerfile.Children {
		select {
		case <-b.clientCtx.Done():
			logrus.Debug("Builder: build cancelled!")
			fmt.Fprintf(b.Stdout, "Build cancelled")
			return "", fmt.Errorf("Build cancelled")
		default:
			// Not cancelled yet, keep going...
		}

		if err := b.dispatch(i, total, n); err != nil {
			if b.options.ForceRemove {
				b.clearTmp()
			}
			return "", err
		}

		shortImgID = stringid.TruncateID(b.image)
		fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID)
		if b.options.Remove {
			b.clearTmp()
		}
	}

	// check if there are any leftover build-args that were passed but not
	// consumed during build. Return a warning, if there are any.
	leftoverArgs := []string{}
	for arg := range b.options.BuildArgs {
		if !b.isBuildArgAllowed(arg) {
			leftoverArgs = append(leftoverArgs, arg)
		}
	}

	if len(leftoverArgs) > 0 {
		fmt.Fprintf(b.Stderr, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs)
	}

	if b.image == "" {
		return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?")
	}

	if b.options.Squash {
		var fromID string
		if b.from != nil {
			fromID = b.from.ImageID()
		}
		b.image, err = b.docker.SquashImage(b.image, fromID)
		if err != nil {
			return "", perrors.Wrap(err, "error squashing image")
		}
	}

	imageID := image.ID(b.image)
	for _, rt := range repoAndTags {
		if err := b.docker.TagImageWithReference(imageID, rt); err != nil {
			return "", err
		}
	}

	fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID)
	return b.image, nil
}
Exemple #12
0
func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) {
	manifestDigest, err = schema2ManifestDigest(ref, mfst)
	if err != nil {
		return "", "", err
	}

	target := mfst.Target()
	imageID = image.ID(target.Digest)
	if _, err := p.config.ImageStore.Get(imageID); err == nil {
		// If the image already exists locally, no need to pull
		// anything.
		return imageID, manifestDigest, nil
	}

	var descriptors []xfer.DownloadDescriptor

	// Note that the order of this loop is in the direction of bottom-most
	// to top-most, so that the downloads slice gets ordered correctly.
	for _, d := range mfst.Layers {
		layerDescriptor := &v2LayerDescriptor{
			digest:            d.Digest,
			repo:              p.repo,
			repoInfo:          p.repoInfo,
			V2MetadataService: p.V2MetadataService,
			src:               d,
		}

		descriptors = append(descriptors, layerDescriptor)
	}

	configChan := make(chan []byte, 1)
	errChan := make(chan error, 1)
	var cancel func()
	ctx, cancel = context.WithCancel(ctx)

	// Pull the image config
	go func() {
		configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest)
		if err != nil {
			errChan <- ImageConfigPullError{Err: err}
			cancel()
			return
		}
		configChan <- configJSON
	}()

	var (
		configJSON         []byte       // raw serialized image config
		unmarshalledConfig image.Image  // deserialized image config
		downloadRootFS     image.RootFS // rootFS to use for registering layers.
	)

	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
	// explicitly blocking images intended for linux from the Windows daemon
	if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" {
		return "", "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
	}

	downloadRootFS = *image.NewRootFS()

	rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput)
	if err != nil {
		if configJSON != nil {
			// Already received the config
			return "", "", err
		}
		select {
		case err = <-errChan:
			return "", "", err
		default:
			cancel()
			select {
			case <-configChan:
			case <-errChan:
			}
			return "", "", err
		}
	}
	defer release()

	if configJSON == nil {
		configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan)
		if err != nil {
			return "", "", err
		}
	}

	// The DiffIDs returned in rootFS MUST match those in the config.
	// Otherwise the image config could be referencing layers that aren't
	// included in the manifest.
	if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) {
		return "", "", errRootFSMismatch
	}

	for i := range rootFS.DiffIDs {
		if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] {
			return "", "", errRootFSMismatch
		}
	}

	imageID, err = p.config.ImageStore.Create(configJSON)
	if err != nil {
		return "", "", err
	}

	return imageID, manifestDigest, nil
}
Exemple #13
0
func TestMigrateImages(t *testing.T) {
	// TODO Windows: Figure out why this is failing
	if runtime.GOOS == "windows" {
		t.Skip("Failing on Windows")
	}
	if runtime.GOARCH != "amd64" {
		t.Skip("Test tailored to amd64 architecture")
	}
	tmpdir, err := ioutil.TempDir("", "migrate-images")
	if err != nil {
		t.Fatal(err)
	}
	defer os.RemoveAll(tmpdir)

	// busybox from 1.9
	id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "")
	if err != nil {
		t.Fatal(err)
	}

	id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "")
	if err != nil {
		t.Fatal(err)
	}

	ls := &mockRegistrar{}

	ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb"))
	if err != nil {
		t.Fatal(err)
	}

	is, err := image.NewImageStore(ifs, ls)
	if err != nil {
		t.Fatal(err)
	}

	ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution"))
	if err != nil {
		t.Fatal(err)
	}
	mappings := make(map[string]image.ID)

	err = migrateImages(tmpdir, ls, is, ms, mappings)
	if err != nil {
		t.Fatal(err)
	}

	expected := map[string]image.ID{
		id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"),
		id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"),
	}

	if !reflect.DeepEqual(mappings, expected) {
		t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings)
	}

	if actual, expected := ls.count, 2; actual != expected {
		t.Fatalf("invalid register count: expected %q, got %q", expected, actual)
	}
	ls.count = 0

	// next images are busybox from 1.8.2
	_, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
	if err != nil {
		t.Fatal(err)
	}

	_, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")
	if err != nil {
		t.Fatal(err)
	}

	err = migrateImages(tmpdir, ls, is, ms, mappings)
	if err != nil {
		t.Fatal(err)
	}

	expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395")
	expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07")

	if actual, expected := ls.count, 2; actual != expected {
		t.Fatalf("invalid register count: expected %q, got %q", expected, actual)
	}

	v2MetadataService := metadata.NewV2MetadataService(ms)
	receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID())
	if err != nil {
		t.Fatal(err)
	}

	expectedMetadata := []metadata.V2Metadata{
		{Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")},
		{Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
	}

	if !reflect.DeepEqual(expectedMetadata, receivedMetadata) {
		t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata)
	}

}
Exemple #14
0
// SquashImage creates a new image with the diff of the specified image and the specified parent.
// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between.
// The existing image(s) is not destroyed.
// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents.
func (daemon *Daemon) SquashImage(id, parent string) (string, error) {
	img, err := daemon.imageStore.Get(image.ID(id))
	if err != nil {
		return "", err
	}

	var parentImg *image.Image
	var parentChainID layer.ChainID
	if len(parent) != 0 {
		parentImg, err = daemon.imageStore.Get(image.ID(parent))
		if err != nil {
			return "", errors.Wrap(err, "error getting specified parent layer")
		}
		parentChainID = parentImg.RootFS.ChainID()
	} else {
		rootFS := image.NewRootFS()
		parentImg = &image.Image{RootFS: rootFS}
	}

	l, err := daemon.layerStore.Get(img.RootFS.ChainID())
	if err != nil {
		return "", errors.Wrap(err, "error getting image layer")
	}
	defer daemon.layerStore.Release(l)

	ts, err := l.TarStreamFrom(parentChainID)
	if err != nil {
		return "", errors.Wrapf(err, "error getting tar stream to parent")
	}
	defer ts.Close()

	newL, err := daemon.layerStore.Register(ts, parentChainID)
	if err != nil {
		return "", errors.Wrap(err, "error registering layer")
	}
	defer daemon.layerStore.Release(newL)

	var newImage image.Image
	newImage = *img
	newImage.RootFS = nil

	var rootFS image.RootFS
	rootFS = *parentImg.RootFS
	rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
	newImage.RootFS = &rootFS

	for i, hi := range newImage.History {
		if i >= len(parentImg.History) {
			hi.EmptyLayer = true
		}
		newImage.History[i] = hi
	}

	now := time.Now()
	var historyComment string
	if len(parent) > 0 {
		historyComment = fmt.Sprintf("merge %s to %s", id, parent)
	} else {
		historyComment = fmt.Sprintf("create new from %s", id)
	}

	newImage.History = append(newImage.History, image.History{
		Created: now,
		Comment: historyComment,
	})
	newImage.Created = now

	b, err := json.Marshal(&newImage)
	if err != nil {
		return "", errors.Wrap(err, "error marshalling image config")
	}

	newImgID, err := daemon.imageStore.Create(b)
	if err != nil {
		return "", errors.Wrap(err, "error creating new image after squash")
	}
	return string(newImgID), nil
}
Exemple #15
0
func TestAddDeleteGet(t *testing.T) {
	jsonFile, err := ioutil.TempFile("", "tag-store-test")
	if err != nil {
		t.Fatalf("error creating temp file: %v", err)
	}
	_, err = jsonFile.Write([]byte(`{}`))
	jsonFile.Close()
	defer os.RemoveAll(jsonFile.Name())

	store, err := NewTagStore(jsonFile.Name())
	if err != nil {
		t.Fatalf("error creating tag store: %v", err)
	}

	testImageID1 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c")
	testImageID2 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d")
	testImageID3 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e")

	// Try adding a reference with no tag or digest
	nameOnly, err := reference.WithName("username/repo")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if err = store.AddTag(nameOnly, testImageID1, false); err != nil {
		t.Fatalf("error adding to store: %v", err)
	}

	// Add a few references
	ref1, err := reference.ParseNamed("username/repo1:latest")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if err = store.AddTag(ref1, testImageID1, false); err != nil {
		t.Fatalf("error adding to store: %v", err)
	}

	ref2, err := reference.ParseNamed("username/repo1:old")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if err = store.AddTag(ref2, testImageID2, false); err != nil {
		t.Fatalf("error adding to store: %v", err)
	}

	ref3, err := reference.ParseNamed("username/repo1:alias")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if err = store.AddTag(ref3, testImageID1, false); err != nil {
		t.Fatalf("error adding to store: %v", err)
	}

	ref4, err := reference.ParseNamed("username/repo2:latest")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if err = store.AddTag(ref4, testImageID2, false); err != nil {
		t.Fatalf("error adding to store: %v", err)
	}

	ref5, err := reference.ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if err = store.AddDigest(ref5.(reference.Canonical), testImageID2, false); err != nil {
		t.Fatalf("error adding to store: %v", err)
	}

	// Attempt to overwrite with force == false
	if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") {
		t.Fatalf("did not get expected error on overwrite attempt - got %v", err)
	}
	// Repeat to overwrite with force == true
	if err = store.AddTag(ref4, testImageID3, true); err != nil {
		t.Fatalf("failed to force tag overwrite: %v", err)
	}

	// Check references so far
	id, err := store.Get(nameOnly)
	if err != nil {
		t.Fatalf("Get returned error: %v", err)
	}
	if id != testImageID1 {
		t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String())
	}

	id, err = store.Get(ref1)
	if err != nil {
		t.Fatalf("Get returned error: %v", err)
	}
	if id != testImageID1 {
		t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String())
	}

	id, err = store.Get(ref2)
	if err != nil {
		t.Fatalf("Get returned error: %v", err)
	}
	if id != testImageID2 {
		t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String())
	}

	id, err = store.Get(ref3)
	if err != nil {
		t.Fatalf("Get returned error: %v", err)
	}
	if id != testImageID1 {
		t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String())
	}

	id, err = store.Get(ref4)
	if err != nil {
		t.Fatalf("Get returned error: %v", err)
	}
	if id != testImageID3 {
		t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String())
	}

	id, err = store.Get(ref5)
	if err != nil {
		t.Fatalf("Get returned error: %v", err)
	}
	if id != testImageID2 {
		t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String())
	}

	// Get should return ErrDoesNotExist for a nonexistent repo
	nonExistRepo, err := reference.ParseNamed("username/nonexistrepo:latest")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist {
		t.Fatal("Expected ErrDoesNotExist from Get")
	}

	// Get should return ErrDoesNotExist for a nonexistent tag
	nonExistTag, err := reference.ParseNamed("username/repo1:nonexist")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	if _, err = store.Get(nonExistTag); err != ErrDoesNotExist {
		t.Fatal("Expected ErrDoesNotExist from Get")
	}

	// Check References
	refs := store.References(testImageID1)
	sort.Sort(LexicalRefs(refs))
	if len(refs) != 3 {
		t.Fatal("unexpected number of references")
	}
	if refs[0].String() != ref3.String() {
		t.Fatalf("unexpected reference: %v", refs[0].String())
	}
	if refs[1].String() != ref1.String() {
		t.Fatalf("unexpected reference: %v", refs[1].String())
	}
	if refs[2].String() != nameOnly.String()+":latest" {
		t.Fatalf("unexpected reference: %v", refs[2].String())
	}

	// Check ReferencesByName
	repoName, err := reference.WithName("username/repo1")
	if err != nil {
		t.Fatalf("could not parse reference: %v", err)
	}
	associations := store.ReferencesByName(repoName)
	sort.Sort(LexicalAssociations(associations))
	if len(associations) != 3 {
		t.Fatal("unexpected number of associations")
	}
	if associations[0].Ref.String() != ref3.String() {
		t.Fatalf("unexpected reference: %v", associations[0].Ref.String())
	}
	if associations[0].ImageID != testImageID1 {
		t.Fatalf("unexpected reference: %v", associations[0].Ref.String())
	}
	if associations[1].Ref.String() != ref1.String() {
		t.Fatalf("unexpected reference: %v", associations[1].Ref.String())
	}
	if associations[1].ImageID != testImageID1 {
		t.Fatalf("unexpected reference: %v", associations[1].Ref.String())
	}
	if associations[2].Ref.String() != ref2.String() {
		t.Fatalf("unexpected reference: %v", associations[2].Ref.String())
	}
	if associations[2].ImageID != testImageID2 {
		t.Fatalf("unexpected reference: %v", associations[2].Ref.String())
	}

	// Delete should return ErrDoesNotExist for a nonexistent repo
	if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist {
		t.Fatal("Expected ErrDoesNotExist from Delete")
	}

	// Delete should return ErrDoesNotExist for a nonexistent tag
	if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist {
		t.Fatal("Expected ErrDoesNotExist from Delete")
	}

	// Delete a few references
	if deleted, err := store.Delete(ref1); err != nil || deleted != true {
		t.Fatal("Delete failed")
	}
	if _, err := store.Get(ref1); err != ErrDoesNotExist {
		t.Fatal("Expected ErrDoesNotExist from Get")
	}
	if deleted, err := store.Delete(ref5); err != nil || deleted != true {
		t.Fatal("Delete failed")
	}
	if _, err := store.Get(ref5); err != ErrDoesNotExist {
		t.Fatal("Expected ErrDoesNotExist from Get")
	}
	if deleted, err := store.Delete(nameOnly); err != nil || deleted != true {
		t.Fatal("Delete failed")
	}
	if _, err := store.Get(nameOnly); err != ErrDoesNotExist {
		t.Fatal("Expected ErrDoesNotExist from Get")
	}
}
Exemple #16
0
func (lic *localImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) {
	return getImageIDAndError(lic.daemon.getLocalCachedImage(image.ID(imgID), config))
}