// rememberLayersOfImage caches the layer digests of given image func (r *repository) rememberLayersOfImage(image *imageapi.Image, cacheName string) { if len(image.DockerImageLayers) == 0 && len(image.DockerImageManifestMediaType) > 0 && len(image.DockerImageConfig) == 0 { // image has no layers return } if len(image.DockerImageLayers) > 0 { for _, layer := range image.DockerImageLayers { r.cachedLayers.RememberDigest(digest.Digest(layer.Name), r.blobrepositorycachettl, cacheName) } // remember reference to manifest config as well for schema 2 if image.DockerImageManifestMediaType == schema2.MediaTypeManifest && len(image.DockerImageMetadata.ID) > 0 { r.cachedLayers.RememberDigest(digest.Digest(image.DockerImageMetadata.ID), r.blobrepositorycachettl, cacheName) } return } mh, err := NewManifestHandlerFromImage(r, image) if err != nil { context.GetLogger(r.ctx).Errorf("cannot remember layers of image %q: %v", image.Name, err) return } dgst, err := mh.Digest() if err != nil { context.GetLogger(r.ctx).Errorf("cannot get manifest digest of image %q: %v", image.Name, err) return } r.rememberLayersOfManifest(dgst, mh.Manifest(), cacheName) }
func testGetSet(t *testing.T, store StoreBackend) { type tcase struct { input []byte expected digest.Digest } tcases := []tcase{ {[]byte("foobar"), digest.Digest("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, } randomInput := make([]byte, 8*1024) _, err := rand.Read(randomInput) if err != nil { t.Fatal(err) } // skipping use of digest pkg because it is used by the implementation h := sha256.New() _, err = h.Write(randomInput) if err != nil { t.Fatal(err) } tcases = append(tcases, tcase{ input: randomInput, expected: digest.Digest("sha256:" + hex.EncodeToString(h.Sum(nil))), }) for _, tc := range tcases { id, err := store.Set([]byte(tc.input)) if err != nil { t.Fatal(err) } if id != tc.expected { t.Fatalf("expected ID %q, got %q", tc.expected, id) } } for _, emptyData := range [][]byte{nil, {}} { _, err := store.Set(emptyData) if err == nil { t.Fatal("expected error for nil input.") } } for _, tc := range tcases { data, err := store.Get(tc.expected) if err != nil { t.Fatal(err) } if bytes.Compare(data, tc.input) != 0 { t.Fatalf("expected data %q, got %q", tc.input, data) } } for _, key := range []digest.Digest{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { _, err := store.Get(key) if err == nil { t.Fatalf("expected error for ID %q.", key) } } }
func TestDeleteDisabled(t *testing.T) { env := newTestEnv(t, false) imageName := "foo/bar" // "build" our layer file layerFile, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer file: %v", err) } layerDigest := digest.Digest(tarSumStr) layerURL, err := env.builder.BuildBlobURL(imageName, layerDigest) if err != nil { t.Fatalf("Error building blob URL") } uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, layerDigest, uploadURLBase, layerFile) resp, err := httpDelete(layerURL) if err != nil { t.Fatalf("unexpected error deleting layer: %v", err) } checkResponse(t, "deleting layer with delete disabled", resp, http.StatusMethodNotAllowed) }
func uploadRandomTarball(t *testing.T, ctx context.Context, bi distribution.BlobIngester) digest.Digest { randomDataReader, tarSumStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random reader: %v", err) } dgst := digest.Digest(tarSumStr) if err != nil { t.Fatalf("error allocating upload store: %v", err) } randomLayerSize, err := seekerSize(randomDataReader) if err != nil { t.Fatalf("error getting seeker size for random layer: %v", err) } _, err = addBlob(ctx, bi, distribution.Descriptor{ Digest: dgst, MediaType: "application/octet-stream", Size: randomLayerSize, }, randomDataReader) if err != nil { t.Fatalf("failed to add blob: %v", err) } return dgst }
func testNewDescriptorForLayer(layer imageapi.ImageLayer) distribution.Descriptor { return distribution.Descriptor{ Digest: digest.Digest(layer.Name), MediaType: "application/octet-stream", Size: layer.LayerSize, } }
func TestFSGetInvalidData(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) fs, err := NewFSStoreBackend(tmpdir) if err != nil { t.Fatal(err) } id, err := fs.Set([]byte("foobar")) if err != nil { t.Fatal(err) } dgst := digest.Digest(id) if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { t.Fatal(err) } _, err = fs.Get(id) if err == nil { t.Fatal("expected get to fail after data modification.") } }
// calculateImageSize gets and updates size of each image layer. If manifest v2 is converted to v1, // then it loses information about layers size. We have to get this information from server again. func (isi *ImageStreamImporter) calculateImageSize(ctx gocontext.Context, repo distribution.Repository, image *api.Image) error { bs := repo.Blobs(ctx) layerSet := sets.NewString() size := int64(0) for i := range image.DockerImageLayers { layer := &image.DockerImageLayers[i] if layerSet.Has(layer.Name) { continue } layerSet.Insert(layer.Name) if layerSize, ok := isi.digestToLayerSizeCache[layer.Name]; ok { size += layerSize continue } desc, err := bs.Stat(ctx, digest.Digest(layer.Name)) if err != nil { return err } isi.digestToLayerSizeCache[layer.Name] = desc.Size layer.LayerSize = desc.Size size += desc.Size } image.DockerImageMetadata.Size = size return nil }
func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { expectedDigest := digest.FromBytes(expected) if digest.Digest(layer.DiffID()) != expectedDigest { t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) } ts, err := layer.TarStream() if err != nil { t.Fatal(err) } defer ts.Close() actual, err := ioutil.ReadAll(ts) if err != nil { t.Fatal(err) } if len(actual) != len(expected) { logByteDiff(t, actual, expected) t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) } actualDigest := digest.FromBytes(actual) if actualDigest != expectedDigest { logByteDiff(t, actual, expected) t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) } }
// TestMakeImageConfig makes sure that MakeImageConfig returns the expected // canonical JSON for a reference Image. func TestMakeImageConfig(t *testing.T) { for _, fixture := range fixtures { v1Compatibility := loadFixtureFile(t, fixture+"/v1compatibility") expectedConfig := loadFixtureFile(t, fixture+"/expected_config") layerID := digest.Digest(loadFixtureFile(t, fixture+"/layer_id")) parentID := digest.Digest(loadFixtureFile(t, fixture+"/parent_id")) json, err := MakeImageConfig(v1Compatibility, layerID, parentID) if err != nil { t.Fatalf("MakeImageConfig on %s returned error: %v", fixture, err) } if !bytes.Equal(json, expectedConfig) { t.Fatalf("did not get expected JSON for %s\nexpected: %s\ngot: %s", fixture, expectedConfig, json) } } }
func checkBlobDescriptorCacheClear(ctx context.Context, t *testing.T, provider cache.BlobDescriptorCacheProvider) { localDigest := digest.Digest("sha384:def111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111") expected := distribution.Descriptor{ Digest: "sha256:def1111111111111111111111111111111111111111111111111111111111111", Size: 10, MediaType: "application/octet-stream"} cache, err := provider.RepositoryScoped("foo/bar") if err != nil { t.Fatalf("unexpected error getting scoped cache: %v", err) } if err := cache.SetDescriptor(ctx, localDigest, expected); err != nil { t.Fatalf("error setting descriptor: %v", err) } desc, err := cache.Stat(ctx, localDigest) if err != nil { t.Fatalf("unexpected error statting fake2:abc: %v", err) } if !reflect.DeepEqual(expected, desc) { t.Fatalf("unexpected descriptor: %#v != %#v", expected, desc) } err = cache.Clear(ctx, localDigest) if err != nil { t.Error(err) } desc, err = cache.Stat(ctx, localDigest) if err == nil { t.Fatalf("expected error statting deleted blob: %v", err) } }
// fromProto converts from a protobuf Resource to a Resource interface. func fromProto(b *pb.Resource) (Resource, error) { base, err := newBaseResource(b.Path[0], os.FileMode(b.Mode), b.Uid, b.Gid) if err != nil { return nil, err } base.xattrs = make(map[string][]byte, len(b.Xattr)) for attr, value := range b.Xattr { base.xattrs[attr] = append(base.xattrs[attr], value...) } switch { case base.Mode().IsRegular(): dgsts := make([]digest.Digest, len(b.Digest)) for i, dgst := range b.Digest { // TODO(stevvooe): Should we be validating at this point? dgsts[i] = digest.Digest(dgst) } return newRegularFile(base, b.Path, int64(b.Size), dgsts...) case base.Mode().IsDir(): return newDirectory(base) case base.Mode()&os.ModeSymlink != 0: return newSymLink(base, b.Target) case base.Mode()&os.ModeNamedPipe != 0: return newNamedPipe(base) case base.Mode()&os.ModeDevice != 0: return newDevice(base, b.Major, b.Minor) } return nil, fmt.Errorf("unknown resource record (%#v): %s", b, base.Mode()) }
func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { containerName := "busyboxbydigest" dockerCmd(c, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") // tag the image to upload it to the private registry repoAndTag := repoName + ":" + tag out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) // delete the container as we don't need it any more err = deleteContainer(containerName) c.Assert(err, checker.IsNil) // push the image out, _, err = dockerCmdWithError("push", repoAndTag) c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) // delete our local repo that we previously tagged rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) matches := pushDigestRegex.FindStringSubmatch(out) c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) pushDigest := matches[1] return digest.Digest(pushDigest), nil }
func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { containerName := "busyboxbydigest" dockerCmd(c, "run", "-d", "-e", "digest=1", "--name", containerName, "busybox") // tag the image to upload it to the private registry repoAndTag := utils.ImageReference(repoName, tag) if out, _, err := dockerCmdWithError("commit", containerName, repoAndTag); err != nil { return "", fmt.Errorf("image tagging failed: %s, %v", out, err) } // delete the container as we don't need it any more if err := deleteContainer(containerName); err != nil { return "", err } // push the image out, _, err := dockerCmdWithError("push", repoAndTag) if err != nil { return "", fmt.Errorf("pushing the image to the private registry has failed: %s, %v", out, err) } // delete our local repo that we previously tagged if rmiout, _, err := dockerCmdWithError("rmi", repoAndTag); err != nil { return "", fmt.Errorf("error deleting images prior to real test: %s, %v", rmiout, err) } matches := pushDigestRegex.FindStringSubmatch(out) if len(matches) != 2 { return "", fmt.Errorf("unable to parse digest from push output: %s", out) } pushDigest := matches[1] return digest.Digest(pushDigest), nil }
func TestInvalidTags(t *testing.T) { tmpDir, err := ioutil.TempDir("", "tag-store-test") defer os.RemoveAll(tmpDir) store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) if err != nil { t.Fatalf("error creating tag store: %v", err) } id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") // sha256 as repo name ref, err := ParseNamed("sha256:abc") if err != nil { t.Fatal(err) } err = store.AddTag(ref, id, true) if err == nil { t.Fatalf("expected setting tag %q to fail", ref) } // setting digest as a tag ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") if err != nil { t.Fatal(err) } err = store.AddTag(ref, id, true) if err == nil { t.Fatalf("expected setting digest %q to fail", ref) } }
func (is *store) Create(config []byte) (ID, error) { var img Image err := json.Unmarshal(config, &img) if err != nil { return "", err } // Must reject any config that references diffIDs from the history // which aren't among the rootfs layers. rootFSLayers := make(map[layer.DiffID]struct{}) for _, diffID := range img.RootFS.DiffIDs { rootFSLayers[diffID] = struct{}{} } layerCounter := 0 for _, h := range img.History { if !h.EmptyLayer { layerCounter++ } } if layerCounter > len(img.RootFS.DiffIDs) { return "", errors.New("too many non-empty layers in History section") } dgst, err := is.fs.Set(config) if err != nil { return "", err } imageID := ID(dgst) is.Lock() defer is.Unlock() if _, exists := is.images[imageID]; exists { return imageID, nil } layerID := img.RootFS.ChainID() var l layer.Layer if layerID != "" { l, err = is.ls.Get(layerID) if err != nil { return "", err } } imageMeta := &imageMeta{ layer: l, children: make(map[ID]struct{}), } is.images[imageID] = imageMeta if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { delete(is.images, imageID) return "", err } return imageID, nil }
func (is *store) Delete(id ID) ([]layer.Metadata, error) { is.Lock() defer is.Unlock() imageMeta := is.images[id] if imageMeta == nil { return nil, fmt.Errorf("unrecognized image ID %s", id.String()) } for id := range imageMeta.children { is.fs.DeleteMetadata(id, "parent") } if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { delete(is.images[parent].children, id) } if err := is.digestSet.Remove(digest.Digest(id)); err != nil { logrus.Errorf("error removing %s from digest set: %q", id, err) } delete(is.images, id) is.fs.Delete(id) if imageMeta.layer != nil { return is.ls.Release(imageMeta.layer) } return nil, nil }
func randomDigest() digest.Digest { b := [32]byte{} for i := 0; i < len(b); i++ { b[i] = byte(rand.Intn(256)) } d := hex.EncodeToString(b[:]) return digest.Digest("sha256:" + d) }
func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName, Tag: tag, FSLayers: []schema1.FSLayer{ { BlobSum: "asdf", }, }, History: []schema1.History{ { V1Compatibility: "", }, }, } // Push 2 random layers expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range unsignedManifest.FSLayers { rs, dgstStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } signedManifest, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } dgst := digest.FromBytes(signedManifest.Canonical) // Create this repository by tag to ensure the tag mapping is made in the registry manifestDigestURL, err := env.builder.BuildManifestURL(imageName, tag) checkErr(t, err, "building manifest url") location, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building location URL") resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{location}, "Docker-Content-Digest": []string{dgst.String()}, }) return dgst }
// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails // if an image configuration specifies a parent that doesn't directly follow // that (deduplicated) image in the image history. func TestFixManifestLayersBadParent(t *testing.T) { duplicateLayerManifest := schema1.Manifest{ FSLayers: []schema1.FSLayer{ {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, }, History: []schema1.History{ {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, }, } if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { t.Fatalf("expected an invalid parent ID error from fixManifestLayers") } }
func taggedMetadata(key string, dgst string, sourceRepo string) metadata.V2Metadata { meta := metadata.V2Metadata{ Digest: digest.Digest(dgst), SourceRepository: sourceRepo, } meta.HMAC = metadata.ComputeV2MetadataHMAC([]byte(key), &meta) return meta }
func createRepository(env *testEnv, t *testing.T, imageName string, tag string) { unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName, Tag: tag, FSLayers: []schema1.FSLayer{ { BlobSum: "asdf", }, { BlobSum: "qwer", }, }, } // Push 2 random layers expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range unsignedManifest.FSLayers { rs, dgstStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } signedManifest, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } payload, err := signedManifest.Payload() checkErr(t, err, "getting manifest payload") dgst, err := digest.FromBytes(payload) checkErr(t, err, "digesting manifest") manifestDigestURL, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building manifest url") resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{manifestDigestURL}, "Docker-Content-Digest": []string{dgst.String()}, }) }
// TestGetStrongID makes sure that GetConfigJSON returns the expected // hash for a reference Image. func TestGetStrongID(t *testing.T) { for _, fixture := range fixtures { expectedConfig := loadFixtureFile(t, fixture+"/expected_config") expectedComputedID := digest.Digest(loadFixtureFile(t, fixture+"/expected_computed_id")) if id, err := StrongID(expectedConfig); err != nil || id != expectedComputedID { t.Fatalf("did not get expected ID for %s\nexpected: %s\ngot: %s\nerror: %v", fixture, expectedComputedID, id, err) } } }
func digestRef(ref reference.Named, digst string) (reference.Canonical, error) { rn, err := reference.ParseNamed(ref.Name()) if err != nil { return nil, err } d := digest.Digest(digst) return reference.WithDigest(rn, d) }
func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { img, err := s.is.Get(id) if err != nil { return nil, err } if len(img.RootFS.DiffIDs) == 0 { return nil, fmt.Errorf("empty export - not implemented") } var parent digest.Digest var layers []string var foreignSrcs map[layer.DiffID]distribution.Descriptor for i := range img.RootFS.DiffIDs { v1Img := image.V1Image{} if i == len(img.RootFS.DiffIDs)-1 { v1Img = img.V1Image } rootFS := *img.RootFS rootFS.DiffIDs = rootFS.DiffIDs[:i+1] v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) if err != nil { return nil, err } v1Img.ID = v1ID.Hex() if parent != "" { v1Img.Parent = parent.Hex() } src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) if err != nil { return nil, err } layers = append(layers, v1Img.ID) parent = v1ID if src.Digest != "" { if foreignSrcs == nil { foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) } foreignSrcs[img.RootFS.DiffIDs[i]] = src } } configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { return nil, err } if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { return nil, err } s.images[id].layers = layers return foreignSrcs, nil }
// imageHasBlob returns true if the image identified by imageName refers to the given blob. The image is // fetched. If requireManaged is true and the image is not managed (it refers to remote registry), the image // will not be processed. Fetched image will update local cache of blobs -> repositories with (blobDigest, // cacheName) pairs. func imageHasBlob( r *repository, cacheName, imageName, blobDigest string, requireManaged bool, ) bool { context.GetLogger(r.ctx).Debugf("getting image %s", imageName) image, err := r.getImage(digest.Digest(imageName)) if err != nil { if kerrors.IsNotFound(err) { context.GetLogger(r.ctx).Debugf("image %q not found: imageName") } else { context.GetLogger(r.ctx).Errorf("failed to get image: %v", err) } return false } // in case of pullthrough disabled, client won't be able to download a blob belonging to not managed image // (image stored in external registry), thus don't consider them as candidates if managed := image.Annotations[imageapi.ManagedByOpenShiftAnnotation]; requireManaged && managed != "true" { context.GetLogger(r.ctx).Debugf("skipping not managed image") return false } if len(image.DockerImageLayers) == 0 { if len(image.DockerImageManifestMediaType) > 0 { // If the media type is set, we can safely assume that the best effort to fill the image layers // has already been done. There are none. return false } err = imageapi.ImageWithMetadata(image) if err != nil { context.GetLogger(r.ctx).Errorf("failed to get metadata for image %s: %v", imageName, err) return false } } for _, layer := range image.DockerImageLayers { if layer.Name == blobDigest { // remember all the layers of matching image r.rememberLayersOfImage(image, cacheName) return true } } // only manifest V2 schema2 has docker image config filled where dockerImage.Metadata.id is its digest if len(image.DockerImageConfig) > 0 && image.DockerImageMetadata.ID == blobDigest { // remember manifest config reference of schema 2 as well r.rememberLayersOfImage(image, cacheName) return true } return false }
func populateRepo(t *testing.T, ctx context.Context, repository distribution.Repository, name, tag string) (digest.Digest, error) { m := schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: name, Tag: tag, } for i := 0; i < 2; i++ { wr, err := repository.Blobs(ctx).Create(ctx) if err != nil { t.Fatalf("unexpected error creating test upload: %v", err) } rs, ts, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("unexpected error generating test layer file") } dgst := digest.Digest(ts) if _, err := io.Copy(wr, rs); err != nil { t.Fatalf("unexpected error copying to upload: %v", err) } if _, err := wr.Commit(ctx, distribution.Descriptor{Digest: dgst}); err != nil { t.Fatalf("unexpected error finishing upload: %v", err) } } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { t.Fatalf("unexpected error generating private key: %v", err) } sm, err := schema1.Sign(&m, pk) if err != nil { t.Fatalf("error signing manifest: %v", err) } ms, err := repository.Manifests(ctx) if err != nil { t.Fatalf(err.Error()) } ms.Put(sm) if err != nil { t.Fatalf("unexpected errors putting manifest: %v", err) } pl, err := sm.Payload() if err != nil { t.Fatal(err) } return digest.FromBytes(pl) }
// CreateRandomLayers returns a map of n digests. We don't particularly care // about the order of said digests (since they're all random anyway). func CreateRandomLayers(n int) (map[digest.Digest]io.ReadSeeker, error) { digestMap := map[digest.Digest]io.ReadSeeker{} for i := 0; i < n; i++ { rs, ds, err := CreateRandomTarFile() if err != nil { return nil, fmt.Errorf("unexpected error generating test layer file: %v", err) } dgst := digest.Digest(ds) digestMap[dgst] = rs } return digestMap, nil }
func TestSchema1ToImage(t *testing.T) { m := &schema1.SignedManifest{} if err := json.Unmarshal([]byte(etcdManifest), m); err != nil { t.Fatal(err) } image, err := schema1ToImage(m, digest.Digest("sha256:test")) if err != nil { t.Fatal(err) } if image.DockerImageMetadata.ID != "sha256:test" { t.Errorf("unexpected image: %#v", image.DockerImageMetadata.ID) } }
func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { testRequires(c, DaemonIsLinux) makeImage := func(from string, tag string) string { var ( out string ) out, _ = dockerCmd(c, "run", "-d", from, "true") cleanedContainerID := strings.TrimSpace(out) out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) imageID := strings.TrimSpace(out) return imageID } repoName := "foobar-save-multi-images-test" tagFoo := repoName + ":foo" tagBar := repoName + ":bar" idFoo := makeImage("busybox:latest", tagFoo) idBar := makeImage("busybox:latest", tagBar) deleteImages(repoName) // create the archive out, _, err := runCommandPipelineWithOutput( exec.Command(dockerBinary, "save", repoName, "busybox:latest"), exec.Command("tar", "t")) c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) lines := strings.Split(strings.TrimSpace(out), "\n") var actual []string for _, l := range lines { if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { actual = append(actual, strings.TrimSuffix(l, ".json")) } } // make the list of expected layers out, _ = dockerCmd(c, "inspect", "-f", "{{.Id}}", "busybox:latest") expected := []string{strings.TrimSpace(out), idFoo, idBar} // prefixes are not in tar for i := range expected { expected[i] = digest.Digest(expected[i]).Hex() } sort.Strings(actual) sort.Strings(expected) c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) }
func TestInvalidReferenceComponents(t *testing.T) { if _, err := WithName("-foo"); err == nil { t.Fatal("Expected WithName to detect invalid name") } ref, err := WithName("busybox") if err != nil { t.Fatal(err) } if _, err := WithTag(ref, "-foo"); err == nil { t.Fatal("Expected WithName to detect invalid tag") } if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { t.Fatal("Expected WithName to detect invalid digest") } }