Example #1
0
// GetDiffID finds a layer DiffID from a blobsum hash.
func (blobserv *BlobSumService) GetDiffID(blobsum digest.Digest) (layer.DiffID, error) {
	diffIDBytes, err := blobserv.store.Get(blobserv.blobSumNamespace(), blobserv.blobSumKey(blobsum))
	if err != nil {
		return layer.DiffID(""), err
	}

	return layer.DiffID(diffIDBytes), nil
}
// GetDiffID finds a layer DiffID from a digest.
func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) {
	diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst))
	if err != nil {
		return layer.DiffID(""), err
	}

	return layer.DiffID(diffIDBytes), nil
}
Example #3
0
// Get finds a layer by its V1 ID.
func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) {
	if err := v1.ValidateID(v1ID); err != nil {
		return layer.DiffID(""), err
	}

	idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID)
	if err != nil {
		return layer.DiffID(""), err
	}
	return layer.DiffID(idBytes), nil
}
Example #4
0
func (c *checksums) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) {
	defer func() {
		if err != nil {
			logrus.Debugf("could not get checksum for %q with tar-split: %q. Attempting fallback.", id, err)
			diffID, size, err = c.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath)
		}
	}()

	if oldTarDataPath == "" {
		err = errors.New("no tar-split file")
		return
	}

	tarDataFile, err := os.Open(oldTarDataPath)
	if err != nil {
		return
	}
	defer tarDataFile.Close()
	uncompressed, err := gzip.NewReader(tarDataFile)
	if err != nil {
		return
	}

	dgst := digest.Canonical.New()
	err = c.assembleTarTo(id, uncompressed, &size, dgst.Hash())
	if err != nil {
		return
	}

	diffID = layer.DiffID(dgst.Digest())
	os.RemoveAll(newTarDataPath)
	err = os.Link(oldTarDataPath, newTarDataPath)
	return
}
Example #5
0
func (c *checksums) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID layer.DiffID, size int64, err error) {
	rawarchive, err := c.driver.TarStream(id, parent)
	if err != nil {
		return
	}
	defer rawarchive.Close()

	f, err := os.Create(newTarDataPath)
	if err != nil {
		return
	}
	defer f.Close()
	mfz := gzip.NewWriter(f)
	defer mfz.Close()
	metaPacker := storage.NewJSONPacker(mfz)

	packerCounter := &packSizeCounter{metaPacker, &size}

	archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil)
	if err != nil {
		return
	}
	dgst, err := digest.FromReader(archive)
	if err != nil {
		return
	}
	diffID = layer.DiffID(dgst)
	return
}
Example #6
0
func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) {
	var (
		parent layer.Layer
		err    error
	)

	if parentID != "" {
		parent, err = ls.Get(parentID)
		if err != nil {
			return nil, err
		}
	}

	l := &mockLayer{parent: parent}
	_, err = l.layerData.ReadFrom(reader)
	if err != nil {
		return nil, err
	}
	diffID, err := digest.FromBytes(l.layerData.Bytes())
	if err != nil {
		return nil, err
	}
	l.diffID = layer.DiffID(diffID)
	l.chainID = createChainIDFromParent(parentID, l.diffID)

	ls.layers[l.chainID] = l
	return l, nil
}
Example #7
0
func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
	for _, l := range layers {
		b, err := dm.blobStore.New()
		if err != nil {
			return initialRootFS, nil, err
		}
		defer b.Close()
		rc, _, err := l.Download(ctx, progressOutput)
		if err != nil {
			return initialRootFS, nil, errors.Wrap(err, "failed to download")
		}
		defer rc.Close()
		r := io.TeeReader(rc, b)
		inflatedLayerData, err := archive.DecompressStream(r)
		if err != nil {
			return initialRootFS, nil, err
		}
		digester := digest.Canonical.New()
		if _, err := archive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil {
			return initialRootFS, nil, err
		}
		initialRootFS.Append(layer.DiffID(digester.Digest()))
		d, err := b.Commit()
		if err != nil {
			return initialRootFS, nil, err
		}
		dm.blobs = append(dm.blobs, d)
	}
	return initialRootFS, nil, nil
}
Example #8
0
// ChainID returns the ChainID for the top layer in RootFS.
func (r *RootFS) ChainID() layer.ChainID {
	ids := r.DiffIDs
	if r.Type == TypeLayersWithBase {
		// Add an extra ID for the base.
		baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID())))
		ids = append([]layer.DiffID{baseDiffID}, ids...)
	}
	return layer.CreateChainID(ids)
}
Example #9
0
func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS {
	rootFS := image.RootFS{
		Type:    pluginfs.Type,
		DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)),
	}
	for i := range pluginfs.DiffIds {
		rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i])
	}

	return &rootFS
}
Example #10
0
func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor {
	return []DownloadDescriptor{
		&mockDownloadDescriptor{
			currentDownloads: currentDownloads,
			id:               "id1",
			expectedDiffID:   layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"),
		},
		&mockDownloadDescriptor{
			currentDownloads: currentDownloads,
			id:               "id2",
			expectedDiffID:   layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"),
		},
		&mockDownloadDescriptor{
			currentDownloads: currentDownloads,
			id:               "id3",
			expectedDiffID:   layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"),
		},
		&mockDownloadDescriptor{
			currentDownloads: currentDownloads,
			id:               "id2",
			expectedDiffID:   layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"),
		},
		&mockDownloadDescriptor{
			currentDownloads: currentDownloads,
			id:               "id4",
			expectedDiffID:   layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"),
			simulateRetries:  1,
		},
		&mockDownloadDescriptor{
			currentDownloads: currentDownloads,
			id:               "id5",
			expectedDiffID:   layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"),
		},
	}
}
Example #11
0
func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
	return []UploadDescriptor{
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0},
	}
}
Example #12
0
func TestV2MetadataService(t *testing.T) {
	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
	if err != nil {
		t.Fatalf("could not create temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	metadataStore, err := NewFSMetadataStore(tmpDir)
	if err != nil {
		t.Fatalf("could not create metadata store: %v", err)
	}
	V2MetadataService := NewV2MetadataService(metadataStore)

	tooManyBlobSums := make([]V2Metadata, 100)
	for i := range tooManyBlobSums {
		randDigest := randomDigest()
		tooManyBlobSums[i] = V2Metadata{Digest: randDigest}
	}

	testVectors := []struct {
		diffID   layer.DiffID
		metadata []V2Metadata
	}{
		{
			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
			metadata: []V2Metadata{
				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
			},
		},
		{
			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
			metadata: []V2Metadata{
				{Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")},
				{Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")},
			},
		},
		{
			diffID:   layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
			metadata: tooManyBlobSums,
		},
	}

	// Set some associations
	for _, vec := range testVectors {
		for _, blobsum := range vec.metadata {
			err := V2MetadataService.Add(vec.diffID, blobsum)
			if err != nil {
				t.Fatalf("error calling Set: %v", err)
			}
		}
	}

	// Check the correct values are read back
	for _, vec := range testVectors {
		metadata, err := V2MetadataService.GetMetadata(vec.diffID)
		if err != nil {
			t.Fatalf("error calling Get: %v", err)
		}
		expectedMetadataEntries := len(vec.metadata)
		if expectedMetadataEntries > 50 {
			expectedMetadataEntries = 50
		}
		if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) {
			t.Fatal("Get returned incorrect layer ID")
		}
	}

	// Test GetMetadata on a nonexistent entry
	_, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
	if err == nil {
		t.Fatal("expected error looking up nonexistent entry")
	}

	// Test GetDiffID on a nonexistent entry
	_, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
	if err == nil {
		t.Fatal("expected error looking up nonexistent entry")
	}

	// Overwrite one of the entries and read it back
	err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0])
	if err != nil {
		t.Fatalf("error calling Add: %v", err)
	}
	diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest)
	if err != nil {
		t.Fatalf("error calling GetDiffID: %v", err)
	}
	if diffID != testVectors[1].diffID {
		t.Fatal("GetDiffID returned incorrect diffID")
	}
}
Example #13
0
func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) {
	defer func() {
		if err != nil {
			logrus.Errorf("migration failed for %v, err: %v", id, err)
		}
	}()

	jsonFile := filepath.Join(root, graphDirName, id, "json")
	imageJSON, err := ioutil.ReadFile(jsonFile)
	if err != nil {
		return err
	}
	var parent struct {
		Parent   string
		ParentID digest.Digest `json:"parent_id"`
	}
	if err := json.Unmarshal(imageJSON, &parent); err != nil {
		return err
	}
	if parent.Parent == "" && parent.ParentID != "" { // v1.9
		parent.Parent = parent.ParentID.Hex()
	}
	// compatibilityID for parent
	parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "parent"))
	if err == nil && len(parentCompatibilityID) > 0 {
		parent.Parent = string(parentCompatibilityID)
	}

	var parentID image.ID
	if parent.Parent != "" {
		var exists bool
		if parentID, exists = mappings[parent.Parent]; !exists {
			if err := migrateImage(parent.Parent, root, ls, is, ms, mappings); err != nil {
				// todo: fail or allow broken chains?
				return err
			}
			parentID = mappings[parent.Parent]
		}
	}

	rootFS := image.NewRootFS()
	var history []image.History

	if parentID != "" {
		parentImg, err := is.Get(parentID)
		if err != nil {
			return err
		}

		rootFS = parentImg.RootFS
		history = parentImg.History
	}

	layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), filepath.Join(filepath.Join(root, graphDirName, id, tarDataFileName)))
	if err != nil {
		return err
	}
	logrus.Infof("migrated layer %s to %s", id, layer.DiffID())

	h, err := imagev1.HistoryFromConfig(imageJSON, false)
	if err != nil {
		return err
	}
	history = append(history, h)

	rootFS.Append(layer.DiffID())

	config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history)
	if err != nil {
		return err
	}
	strongID, err := is.Create(config)
	if err != nil {
		return err
	}
	logrus.Infof("migrated image %s to %s", id, strongID)

	if parentID != "" {
		if err := is.SetParent(strongID, parentID); err != nil {
			return err
		}
	}

	checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum"))
	if err == nil { // best effort
		dgst, err := digest.ParseDigest(string(checksum))
		if err == nil {
			blobSumService := metadata.NewBlobSumService(ms)
			blobSumService.Add(layer.DiffID(), dgst)
		}
	}
	_, err = ls.Release(layer)
	if err != nil {
		return err
	}

	mappings[id] = strongID
	return
}
Example #14
0
File: imagec.go Project: vmware/vic
// CreateImageConfig constructs the image metadata from layers that compose the image
func (ic *ImageC) CreateImageConfig(images []*ImageWithMeta) (metadata.ImageConfig, error) {

	imageLayer := images[0] // the layer that represents the actual image

	// if we already have an imageID associated with this layerID, we don't need
	// to calculate imageID and can just grab the image config from the cache
	id := cache.RepositoryCache().GetImageID(imageLayer.ID)
	if image, err := cache.ImageCache().Get(id); err == nil {
		return *image, nil
	}

	manifest := ic.ImageManifest
	image := docker.V1Image{}
	rootFS := docker.NewRootFS()
	history := make([]docker.History, 0, len(images))
	diffIDs := make(map[string]string)
	var size int64

	// step through layers to get command history and diffID from oldest to newest
	for i := len(images) - 1; i >= 0; i-- {
		layer := images[i]
		if err := json.Unmarshal([]byte(layer.Meta), &image); err != nil {
			return metadata.ImageConfig{}, fmt.Errorf("Failed to unmarshall layer history: %s", err)
		}
		h := docker.History{
			Created:   image.Created,
			Author:    image.Author,
			CreatedBy: strings.Join(image.ContainerConfig.Cmd, " "),
			Comment:   image.Comment,
		}
		history = append(history, h)
		rootFS.DiffIDs = append(rootFS.DiffIDs, dockerLayer.DiffID(layer.DiffID))
		diffIDs[layer.DiffID] = layer.ID
		size += layer.Size
	}

	// result is constructed without unused fields
	result := docker.Image{
		V1Image: docker.V1Image{
			Comment:         image.Comment,
			Created:         image.Created,
			Container:       image.Container,
			ContainerConfig: image.ContainerConfig,
			DockerVersion:   image.DockerVersion,
			Author:          image.Author,
			Config:          image.Config,
			Architecture:    image.Architecture,
			OS:              image.OS,
		},
		RootFS:  rootFS,
		History: history,
	}

	bytes, err := result.MarshalJSON()
	if err != nil {
		return metadata.ImageConfig{}, fmt.Errorf("Failed to marshall image metadata: %s", err)
	}

	// calculate image ID
	sum := fmt.Sprintf("%x", sha256.Sum256(bytes))
	log.Infof("Image ID: sha256:%s", sum)

	// prepare metadata
	result.V1Image.Parent = image.Parent
	result.Size = size
	result.V1Image.ID = imageLayer.ID
	imageConfig := metadata.ImageConfig{
		V1Image: result.V1Image,
		ImageID: sum,
		// TODO: this will change when issue 1186 is
		// implemented -- only populate the digests when pulled by digest
		Digests:   []string{manifest.Digest},
		Tags:      []string{ic.Tag},
		Name:      manifest.Name,
		DiffIDs:   diffIDs,
		History:   history,
		Reference: ic.Reference,
	}

	return imageConfig, nil
}
Example #15
0
// CreateImageConfig constructs the image metadata from layers that compose the image
func CreateImageConfig(images []*ImageWithMeta, manifest *Manifest) error {

	if len(images) == 0 {
		return nil
	}

	imageLayer := images[0] // the layer that represents the actual image
	image := docker.V1Image{}
	rootFS := docker.NewRootFS()
	history := make([]docker.History, 0, len(images))
	diffIDs := make(map[string]string)
	var size int64

	// step through layers to get command history and diffID from oldest to newest
	for i := len(images) - 1; i >= 0; i-- {
		layer := images[i]
		if err := json.Unmarshal([]byte(layer.meta), &image); err != nil {
			return fmt.Errorf("Failed to unmarshall layer history: %s", err)
		}
		h := docker.History{
			Created:   image.Created,
			Author:    image.Author,
			CreatedBy: strings.Join(image.ContainerConfig.Cmd, " "),
			Comment:   image.Comment,
		}
		history = append(history, h)
		rootFS.DiffIDs = append(rootFS.DiffIDs, dockerLayer.DiffID(layer.diffID))
		diffIDs[layer.diffID] = layer.ID
		size += layer.size
	}

	// result is constructed without unused fields
	result := docker.Image{
		V1Image: docker.V1Image{
			Comment:         image.Comment,
			Created:         image.Created,
			Container:       image.Container,
			ContainerConfig: image.ContainerConfig,
			DockerVersion:   image.DockerVersion,
			Author:          image.Author,
			Config:          image.Config,
			Architecture:    image.Architecture,
			OS:              image.OS,
		},
		RootFS:  rootFS,
		History: history,
	}

	bytes, err := result.MarshalJSON()
	if err != nil {
		return fmt.Errorf("Failed to marshall image metadata: %s", err)
	}

	// calculate image ID
	sum := fmt.Sprintf("%x", sha256.Sum256(bytes))
	log.Infof("Image ID: sha256:%s", sum)

	// prepare metadata
	result.V1Image.Parent = image.Parent
	result.Size = size
	result.V1Image.ID = imageLayer.ID
	metaData := metadata.ImageConfig{
		V1Image: result.V1Image,
		ImageID: sum,
		// TODO: this will change when issue 1186 is
		// implemented -- only populate the digests when pulled by digest
		Digests: []string{manifest.Digest},
		Tags:    []string{options.tag},
		Name:    manifest.Name,
		DiffIDs: diffIDs,
		History: history,
	}

	blob, err := json.Marshal(metaData)
	if err != nil {
		return fmt.Errorf("Failed to marshal image metadata: %s", err)
	}

	// store metadata
	imageLayer.meta = string(blob)

	return nil
}
Example #16
0
func TestV1IDService(t *testing.T) {
	tmpDir, err := ioutil.TempDir("", "v1-id-service-test")
	if err != nil {
		t.Fatalf("could not create temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	metadataStore, err := NewFSMetadataStore(tmpDir)
	if err != nil {
		t.Fatalf("could not create metadata store: %v", err)
	}
	v1IDService := NewV1IDService(metadataStore)

	testVectors := []struct {
		registry string
		v1ID     string
		layerID  layer.DiffID
	}{
		{
			registry: "registry1",
			v1ID:     "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937",
			layerID:  layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
		},
		{
			registry: "registry2",
			v1ID:     "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e",
			layerID:  layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
		},
		{
			registry: "registry1",
			v1ID:     "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e",
			layerID:  layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
		},
	}

	// Set some associations
	for _, vec := range testVectors {
		err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID)
		if err != nil {
			t.Fatalf("error calling Set: %v", err)
		}
	}

	// Check the correct values are read back
	for _, vec := range testVectors {
		layerID, err := v1IDService.Get(vec.v1ID, vec.registry)
		if err != nil {
			t.Fatalf("error calling Get: %v", err)
		}
		if layerID != vec.layerID {
			t.Fatal("Get returned incorrect layer ID")
		}
	}

	// Test Get on a nonexistent entry
	_, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1")
	if err == nil {
		t.Fatal("expected error looking up nonexistent entry")
	}

	// Overwrite one of the entries and read it back
	err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID)
	if err != nil {
		t.Fatalf("error calling Set: %v", err)
	}
	layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry)
	if err != nil {
		t.Fatalf("error calling Get: %v", err)
	}
	if layerID != testVectors[1].layerID {
		t.Fatal("Get returned incorrect layer ID")
	}
}
Example #17
0
// Execute the scratch-n-push
func (s *DockerScratchPushStep) Execute(ctx context.Context, sess *core.Session) (int, error) {
	// This is clearly only relevant to docker so we're going to dig into the
	// transport internals a little bit to get the container ID
	dt := sess.Transport().(*DockerTransport)
	containerID := dt.containerID

	_, err := s.CollectArtifact(containerID)
	if err != nil {
		return -1, err
	}

	// layer.tar has an extra folder in it so we have to strip it :/
	artifactReader, err := os.Open(s.options.HostPath("layer.tar"))
	if err != nil {
		return -1, err
	}
	defer artifactReader.Close()

	layerFile, err := os.OpenFile(s.options.HostPath("real_layer.tar"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer layerFile.Close()

	dgst := digest.Canonical.New()
	mwriter := io.MultiWriter(layerFile, dgst.Hash())

	tr := tar.NewReader(artifactReader)
	tw := tar.NewWriter(mwriter)

	for {
		hdr, err := tr.Next()
		if err == io.EOF {
			// finished the tarball
			break
		}

		if err != nil {
			return -1, err
		}

		// Skip the base dir
		if hdr.Name == "./" {
			continue
		}

		if strings.HasPrefix(hdr.Name, "output/") {
			hdr.Name = hdr.Name[len("output/"):]
		} else if strings.HasPrefix(hdr.Name, "source/") {
			hdr.Name = hdr.Name[len("source/"):]
		}

		if len(hdr.Name) == 0 {
			continue
		}

		tw.WriteHeader(hdr)
		_, err = io.Copy(tw, tr)
		if err != nil {
			return -1, err
		}
	}

	digest := dgst.Digest()

	config := &container.Config{
		Cmd:          s.cmd,
		Entrypoint:   s.entrypoint,
		Hostname:     containerID[:16],
		WorkingDir:   s.workingDir,
		Volumes:      s.volumes,
		ExposedPorts: tranformPorts(s.ports),
	}

	// Make the JSON file we need
	t := time.Now()
	base := image.V1Image{
		Architecture: "amd64",
		Container:    containerID,
		ContainerConfig: container.Config{
			Hostname: containerID[:16],
		},
		DockerVersion: "1.10",
		Created:       t,
		OS:            "linux",
		Config:        config,
	}

	imageJSON := image.Image{
		V1Image: base,
		History: []image.History{image.History{Created: t}},
		RootFS: &image.RootFS{
			Type:    "layers",
			DiffIDs: []layer.DiffID{layer.DiffID(digest)},
		},
	}

	js, err := imageJSON.MarshalJSON()
	if err != nil {
		return -1, err
	}

	hash := sha256.New()
	hash.Write(js)
	layerID := hex.EncodeToString(hash.Sum(nil))

	err = os.MkdirAll(s.options.HostPath("scratch", layerID), 0755)
	if err != nil {
		return -1, err
	}

	layerFile.Close()

	err = os.Rename(layerFile.Name(), s.options.HostPath("scratch", layerID, "layer.tar"))
	if err != nil {
		return -1, err
	}
	defer os.RemoveAll(s.options.HostPath("scratch"))

	// VERSION file
	versionFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "VERSION"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer versionFile.Close()

	_, err = versionFile.Write([]byte("1.0"))
	if err != nil {
		return -1, err
	}

	err = versionFile.Sync()
	if err != nil {
		return -1, err
	}

	// json file
	jsonFile, err := os.OpenFile(s.options.HostPath("scratch", layerID, "json"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer jsonFile.Close()

	_, err = jsonFile.Write(js)
	if err != nil {
		return -1, err
	}

	err = jsonFile.Sync()
	if err != nil {
		return -1, err
	}

	// repositories file
	repositoriesFile, err := os.OpenFile(s.options.HostPath("scratch", "repositories"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
	if err != nil {
		return -1, err
	}
	defer repositoriesFile.Close()

	_, err = repositoriesFile.Write([]byte(fmt.Sprintf(`{"%s":{`, s.authenticator.Repository(s.repository))))
	if err != nil {
		return -1, err
	}

	if len(s.tags) == 0 {
		s.tags = []string{"latest"}
	}

	for i, tag := range s.tags {
		_, err = repositoriesFile.Write([]byte(fmt.Sprintf(`"%s":"%s"`, tag, layerID)))
		if err != nil {
			return -1, err
		}
		if i != len(s.tags)-1 {
			_, err = repositoriesFile.Write([]byte{','})
			if err != nil {
				return -1, err
			}
		}
	}

	_, err = repositoriesFile.Write([]byte{'}', '}'})
	err = repositoriesFile.Sync()
	if err != nil {
		return -1, err
	}

	// Build our output tarball and start writing to it
	imageFile, err := os.Create(s.options.HostPath("scratch.tar"))
	if err != nil {
		return -1, err
	}
	defer imageFile.Close()

	err = util.TarPath(imageFile, s.options.HostPath("scratch"))
	if err != nil {
		return -1, err
	}
	imageFile.Close()

	client, err := NewDockerClient(s.dockerOptions)
	if err != nil {
		return 1, err
	}

	// Check the auth
	if !s.dockerOptions.DockerLocal {
		check, err := s.authenticator.CheckAccess(s.repository, auth.Push)
		if !check || err != nil {
			s.logger.Errorln("Not allowed to interact with this repository:", s.repository)
			return -1, fmt.Errorf("Not allowed to interact with this repository: %s", s.repository)
		}
	}

	s.repository = s.authenticator.Repository(s.repository)
	s.logger.WithFields(util.LogFields{
		"Repository": s.repository,
		"Tags":       s.tags,
		"Message":    s.message,
	}).Debug("Scratch push to registry")

	// Okay, we can access it, do a docker load to import the image then push it
	loadFile, err := os.Open(s.options.HostPath("scratch.tar"))
	if err != nil {
		return -1, err
	}
	defer loadFile.Close()

	e, err := core.EmitterFromContext(ctx)
	if err != nil {
		return 1, err
	}

	err = client.LoadImage(docker.LoadImageOptions{InputStream: loadFile})
	if err != nil {
		return 1, err
	}

	return s.tagAndPush(layerID, e, client)
}
Example #18
0
func TestCreateV2Manifest(t *testing.T) {
	imgJSON := `{
    "architecture": "amd64",
    "config": {
        "AttachStderr": false,
        "AttachStdin": false,
        "AttachStdout": false,
        "Cmd": [
            "/bin/sh",
            "-c",
            "echo hi"
        ],
        "Domainname": "",
        "Entrypoint": null,
        "Env": [
            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
            "derived=true",
            "asdf=true"
        ],
        "Hostname": "23304fc829f9",
        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
        "Labels": {},
        "OnBuild": [],
        "OpenStdin": false,
        "StdinOnce": false,
        "Tty": false,
        "User": "",
        "Volumes": null,
        "WorkingDir": ""
    },
    "container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
    "container_config": {
        "AttachStderr": false,
        "AttachStdin": false,
        "AttachStdout": false,
        "Cmd": [
            "/bin/sh",
            "-c",
            "#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
        ],
        "Domainname": "",
        "Entrypoint": null,
        "Env": [
            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
            "derived=true",
            "asdf=true"
        ],
        "Hostname": "23304fc829f9",
        "Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
        "Labels": {},
        "OnBuild": [],
        "OpenStdin": false,
        "StdinOnce": false,
        "Tty": false,
        "User": "",
        "Volumes": null,
        "WorkingDir": ""
    },
    "created": "2015-11-04T23:06:32.365666163Z",
    "docker_version": "1.9.0-dev",
    "history": [
        {
            "created": "2015-10-31T22:22:54.690851953Z",
            "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
        },
        {
            "created": "2015-10-31T22:22:55.613815829Z",
            "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
        },
        {
            "created": "2015-11-04T23:06:30.934316144Z",
            "created_by": "/bin/sh -c #(nop) ENV derived=true",
            "empty_layer": true
        },
        {
            "created": "2015-11-04T23:06:31.192097572Z",
            "created_by": "/bin/sh -c #(nop) ENV asdf=true",
            "empty_layer": true
        },
        {
            "created": "2015-11-04T23:06:32.083868454Z",
            "created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
        },
        {
            "created": "2015-11-04T23:06:32.365666163Z",
            "created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
            "empty_layer": true
        }
    ],
    "os": "linux",
    "rootfs": {
        "diff_ids": [
            "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
            "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
            "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
        ],
        "type": "layers"
    }
}`

	// To fill in rawJSON
	img, err := image.NewFromJSON([]byte(imgJSON))
	if err != nil {
		t.Fatalf("json decoding failed: %v", err)
	}

	fsLayers := map[layer.DiffID]digest.Digest{
		layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
		layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
		layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
	}

	manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers)
	if err != nil {
		t.Fatalf("CreateV2Manifest returned error: %v", err)
	}

	if manifest.Versioned.SchemaVersion != 1 {
		t.Fatal("SchemaVersion != 1")
	}
	if manifest.Name != "testrepo" {
		t.Fatal("incorrect name in manifest")
	}
	if manifest.Tag != "testtag" {
		t.Fatal("incorrect tag in manifest")
	}
	if manifest.Architecture != "amd64" {
		t.Fatal("incorrect arch in manifest")
	}

	expectedFSLayers := []schema1.FSLayer{
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
		{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
		{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
		{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
	}

	if len(manifest.FSLayers) != len(expectedFSLayers) {
		t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
	}
	if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
		t.Fatal("wrong FSLayers list")
	}

	expectedV1Compatibility := []string{
		`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`,
		`{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
		`{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
		`{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
		`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
		`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
	}

	if len(manifest.History) != len(expectedV1Compatibility) {
		t.Fatalf("wrong number of history entries: %d", len(manifest.History))
	}
	for i := range expectedV1Compatibility {
		if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
			t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
		}
	}
}
Example #19
0
// ChainID returns the ChainID for the top layer in RootFS.
func (r *RootFS) ChainID() layer.ChainID {
	baseDiffID := digest.FromBytes([]byte(r.BaseLayerID()))
	return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...))
}
Example #20
0
	return digest.FromBytes([]byte(u.diffID.String()))
}

func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
	return []UploadDescriptor{
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1},
		&mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0},
	}
}

var expectedDigests = map[layer.DiffID]digest.Digest{
	layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"): digest.Digest("sha256:c5095d6cf7ee42b7b064371dcc1dc3fb4af197f04d01a60009d484bd432724fc"),
	layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"): digest.Digest("sha256:968cbfe2ff5269ea1729b3804767a1f57ffbc442d3bc86f47edbf7e688a4f36e"),
	layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"): digest.Digest("sha256:8a5e56ab4b477a400470a7d5d4c1ca0c91235fd723ab19cc862636a06f3a735d"),
	layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"): digest.Digest("sha256:5e733e5cd3688512fc240bd5c178e72671c9915947d17bb8451750d827944cb2"),
	layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"): digest.Digest("sha256:ec4bb98d15e554a9f66c3ef9296cf46772c0ded3b1592bd8324d96e2f60f460c"),
}

func TestSuccessfulUpload(t *testing.T) {
	lum := NewLayerUploadManager(maxUploadConcurrency)

	progressChan := make(chan progress.Progress)
	progressDone := make(chan struct{})
	receivedProgress := make(map[string]int64)

	go func() {
		for p := range progressChan {
Example #21
0
func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) {
	defer func() {
		if err != nil {
			logrus.Errorf("migration failed for %v, err: %v", id, err)
		}
	}()

	parent, err := getParent(filepath.Join(root, graphDirName, id))
	if err != nil {
		return err
	}

	var parentID image.ID
	if parent != "" {
		var exists bool
		if parentID, exists = mappings[parent]; !exists {
			if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil {
				// todo: fail or allow broken chains?
				return err
			}
			parentID = mappings[parent]
		}
	}

	rootFS := image.NewRootFS()
	var history []image.History

	if parentID != "" {
		parentImg, err := is.Get(parentID)
		if err != nil {
			return err
		}

		rootFS = parentImg.RootFS
		history = parentImg.History
	}

	diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName))
	if err != nil {
		return err
	}
	diffID, err := digest.ParseDigest(string(diffIDData))
	if err != nil {
		return err
	}

	sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName))
	if err != nil {
		return err
	}
	size, err := strconv.ParseInt(string(sizeStr), 10, 64)
	if err != nil {
		return err
	}

	layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size)
	if err != nil {
		return err
	}
	logrus.Infof("migrated layer %s to %s", id, layer.DiffID())

	jsonFile := filepath.Join(root, graphDirName, id, "json")
	imageJSON, err := ioutil.ReadFile(jsonFile)
	if err != nil {
		return err
	}

	h, err := imagev1.HistoryFromConfig(imageJSON, false)
	if err != nil {
		return err
	}
	history = append(history, h)

	rootFS.Append(layer.DiffID())

	config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history)
	if err != nil {
		return err
	}
	strongID, err := is.Create(config)
	if err != nil {
		return err
	}
	logrus.Infof("migrated image %s to %s", id, strongID)

	if parentID != "" {
		if err := is.SetParent(strongID, parentID); err != nil {
			return err
		}
	}

	checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum"))
	if err == nil { // best effort
		dgst, err := digest.ParseDigest(string(checksum))
		if err == nil {
			V2MetadataService := metadata.NewV2MetadataService(ms)
			V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst})
		}
	}
	_, err = ls.Release(layer)
	if err != nil {
		return err
	}

	mappings[id] = strongID
	return
}
Example #22
0
func TestBlobSumService(t *testing.T) {
	tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test")
	if err != nil {
		t.Fatalf("could not create temp dir: %v", err)
	}
	defer os.RemoveAll(tmpDir)

	metadataStore, err := NewFSMetadataStore(tmpDir)
	if err != nil {
		t.Fatalf("could not create metadata store: %v", err)
	}
	blobSumService := NewBlobSumService(metadataStore)

	testVectors := []struct {
		diffID   layer.DiffID
		blobsums []digest.Digest
	}{
		{
			diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
			blobsums: []digest.Digest{
				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
			},
		},
		{
			diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
			blobsums: []digest.Digest{
				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
			},
		},
		{
			diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"),
			blobsums: []digest.Digest{
				digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937"),
				digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e"),
				digest.Digest("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"),
				digest.Digest("sha256:8902a7ca89aabbb868835260912159026637634090dd8899eee969523252236e"),
				digest.Digest("sha256:c84364306344ccc48532c52ff5209236273525231dddaaab53262322352883aa"),
				digest.Digest("sha256:aa7583bbc87532a8352bbb72520a821b3623523523a8352523a52352aaa888fe"),
			},
		},
	}

	// Set some associations
	for _, vec := range testVectors {
		for _, blobsum := range vec.blobsums {
			err := blobSumService.Add(vec.diffID, blobsum)
			if err != nil {
				t.Fatalf("error calling Set: %v", err)
			}
		}
	}

	// Check the correct values are read back
	for _, vec := range testVectors {
		blobsums, err := blobSumService.GetBlobSums(vec.diffID)
		if err != nil {
			t.Fatalf("error calling Get: %v", err)
		}
		expectedBlobsums := len(vec.blobsums)
		if expectedBlobsums > 5 {
			expectedBlobsums = 5
		}
		if !reflect.DeepEqual(blobsums, vec.blobsums[len(vec.blobsums)-expectedBlobsums:len(vec.blobsums)]) {
			t.Fatal("Get returned incorrect layer ID")
		}
	}

	// Test GetBlobSums on a nonexistent entry
	_, err = blobSumService.GetBlobSums(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
	if err == nil {
		t.Fatal("expected error looking up nonexistent entry")
	}

	// Test GetDiffID on a nonexistent entry
	_, err = blobSumService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917"))
	if err == nil {
		t.Fatal("expected error looking up nonexistent entry")
	}

	// Overwrite one of the entries and read it back
	err = blobSumService.Add(testVectors[1].diffID, testVectors[0].blobsums[0])
	if err != nil {
		t.Fatalf("error calling Add: %v", err)
	}
	diffID, err := blobSumService.GetDiffID(testVectors[0].blobsums[0])
	if err != nil {
		t.Fatalf("error calling GetDiffID: %v", err)
	}
	if diffID != testVectors[1].diffID {
		t.Fatal("GetDiffID returned incorrect diffID")
	}
}