Пример #1
0
func makeLayer(t *testing.T, compression archive.Compression) (ddigest.Digest, int64, int64, []byte) {
	var cwriter io.WriteCloser
	var uncompressed *ioutils.WriteCounter
	var twriter *tar.Writer
	preader, pwriter := io.Pipe()
	tbuffer := bytes.Buffer{}
	if compression != archive.Uncompressed {
		compressor, err := archive.CompressStream(pwriter, compression)
		if err != nil {
			t.Fatalf("Error compressing layer: %v", err)
		}
		cwriter = compressor
		uncompressed = ioutils.NewWriteCounter(cwriter)
	} else {
		uncompressed = ioutils.NewWriteCounter(pwriter)
	}
	twriter = tar.NewWriter(uncompressed)
	buf := make([]byte, layerSize)
	n, err := rand.Read(buf)
	if err != nil {
		t.Fatalf("Error reading tar data: %v", err)
	}
	if n != len(buf) {
		t.Fatalf("Short read reading tar data: %d < %d", n, len(buf))
	}
	for i := 1024; i < 2048; i++ {
		buf[i] = 0
	}
	go func() {
		defer pwriter.Close()
		if cwriter != nil {
			defer cwriter.Close()
		}
		defer twriter.Close()
		err := twriter.WriteHeader(&tar.Header{
			Name:       "/random-single-file",
			Mode:       0600,
			Size:       int64(len(buf)),
			ModTime:    time.Now(),
			AccessTime: time.Now(),
			ChangeTime: time.Now(),
			Typeflag:   tar.TypeReg,
		})
		if err != nil {
			t.Fatalf("Error writing tar header: %v", err)
		}
		n, err := twriter.Write(buf)
		if err != nil {
			t.Fatalf("Error writing tar header: %v", err)
		}
		if n != len(buf) {
			t.Fatalf("Short write writing tar header: %d < %d", n, len(buf))
		}
	}()
	_, err = io.Copy(&tbuffer, preader)
	if err != nil {
		t.Fatalf("Error reading layer tar: %v", err)
	}
	sum := ddigest.SHA256.FromBytes(tbuffer.Bytes())
	return sum, uncompressed.Count, int64(tbuffer.Len()), tbuffer.Bytes()
}
Пример #2
0
func TestWriteRead(t *testing.T) {
	if os.Geteuid() != 0 {
		t.Skip("TestWriteRead requires root privileges")
	}

	config := `{"config":{"labels":{}},"created":"2006-01-02T15:04:05Z"}`
	sum := ddigest.SHA256.FromBytes([]byte(config))
	configInfo := types.BlobInfo{
		Digest: sum,
		Size:   int64(len(config)),
	}
	manifests := []string{
		//`{
		//    "schemaVersion": 2,
		//    "mediaType": "application/vnd.oci.image.manifest.v1+json",
		//    "config": {
		//	"mediaType": "application/vnd.oci.image.serialization.config.v1+json",
		//	"size": %cs,
		//	"digest": "%ch"
		//    },
		//    "layers": [
		//	{
		//	    "mediaType": "application/vnd.oci.image.serialization.rootfs.tar.gzip",
		//	    "digest": "%lh",
		//	    "size": %ls
		//	}
		//    ]
		//}`,
		`{
		    "schemaVersion": 1,
		    "name": "test",
		    "tag": "latest",
		    "architecture": "amd64",
		    "fsLayers": [
			{
			    "blobSum": "%lh"
			}
		    ],
		    "history": [
			{
				"v1Compatibility": "{\"id\":\"%li\",\"created\":\"2016-03-03T11:29:44.222098366Z\",\"container\":\"\",\"container_config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.8.2-fc22\",\"author\":\"\\\"William Temple \\u003cwtemple at redhat dot com\\u003e\\\"\",\"config\":{\"Hostname\":\"56f0fe1dfc95\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"ExposedPorts\":null,\"PublishService\":\"\",\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":%ls}"
			}
		    ]
		}`,
		`{
		    "schemaVersion": 2,
		    "mediaType": "application/vnd.docker.distribution.manifest.v2+json",
		    "config": {
			"mediaType": "application/vnd.docker.container.image.v1+json",
			"size": %cs,
			"digest": "%ch"
		    },
		    "layers": [
			{
			    "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
			    "digest": "%lh",
			    "size": %ls
			}
		    ]
		}`,
	}
	signatures := [][]byte{
		[]byte("Signature A"),
		[]byte("Signature B"),
	}
	newStore(t)
	ref, err := Transport.ParseReference("test")
	if err != nil {
		t.Fatalf("ParseReference(%q) returned error %v", "test", err)
	}
	if ref == nil {
		t.Fatalf("ParseReference returned nil reference")
	}

	for _, manifestFmt := range manifests {
		dest, err := ref.NewImageDestination(systemContext())
		if err != nil {
			t.Fatalf("NewImageDestination(%q) returned error %v", ref.StringWithinTransport(), err)
		}
		if dest == nil {
			t.Fatalf("NewImageDestination(%q) returned no destination", ref.StringWithinTransport())
		}
		if dest.Reference().StringWithinTransport() != ref.StringWithinTransport() {
			t.Fatalf("NewImageDestination(%q) changed the reference to %q", ref.StringWithinTransport(), dest.Reference().StringWithinTransport())
		}
		t.Logf("supported manifest MIME types: %v", dest.SupportedManifestMIMETypes())
		if err := dest.SupportsSignatures(); err != nil {
			t.Fatalf("Destination image doesn't support signatures: %v", err)
		}
		t.Logf("compress layers: %v", dest.ShouldCompressLayers())
		compression := archive.Uncompressed
		if dest.ShouldCompressLayers() {
			compression = archive.Gzip
		}
		digest, decompressedSize, size, blob := makeLayer(t, compression)
		if _, err := dest.PutBlob(bytes.NewBuffer(blob), types.BlobInfo{
			Size:   size,
			Digest: digest,
		}); err != nil {
			t.Fatalf("Error saving randomly-generated layer to destination: %v", err)
		}
		t.Logf("Wrote randomly-generated layer %q (%d/%d bytes) to destination", digest, size, decompressedSize)
		if _, err := dest.PutBlob(bytes.NewBufferString(config), configInfo); err != nil {
			t.Fatalf("Error saving config to destination: %v", err)
		}
		manifest := strings.Replace(manifestFmt, "%lh", digest.String(), -1)
		manifest = strings.Replace(manifest, "%ch", configInfo.Digest.String(), -1)
		manifest = strings.Replace(manifest, "%ls", fmt.Sprintf("%d", size), -1)
		manifest = strings.Replace(manifest, "%cs", fmt.Sprintf("%d", configInfo.Size), -1)
		li := digest.Hex()
		manifest = strings.Replace(manifest, "%li", li, -1)
		manifest = strings.Replace(manifest, "%ci", sum.Hex(), -1)
		t.Logf("this manifest is %q", manifest)
		if err := dest.PutManifest([]byte(manifest)); err != nil {
			t.Fatalf("Error saving manifest to destination: %v", err)
		}
		if err := dest.PutSignatures(signatures); err != nil {
			t.Fatalf("Error saving signatures to destination: %v", err)
		}
		if err := dest.Commit(); err != nil {
			t.Fatalf("Error committing changes to destination: %v", err)
		}
		dest.Close()

		img, err := ref.NewImage(systemContext())
		if err != nil {
			t.Fatalf("NewImage(%q) returned error %v", ref.StringWithinTransport(), err)
		}
		imageConfigInfo := img.ConfigInfo()
		if imageConfigInfo.Digest != "" {
			blob, err := img.ConfigBlob()
			if err != nil {
				t.Fatalf("image %q claimed there was a config blob, but couldn't produce it: %v", ref.StringWithinTransport(), err)
			}
			sum := ddigest.SHA256.FromBytes(blob)
			if sum != configInfo.Digest {
				t.Fatalf("image config blob digest for %q doesn't match", ref.StringWithinTransport())
			}
			if int64(len(blob)) != configInfo.Size {
				t.Fatalf("image config size for %q changed from %d to %d", ref.StringWithinTransport(), configInfo.Size, len(blob))
			}
		}
		layerInfos := img.LayerInfos()
		if layerInfos == nil {
			t.Fatalf("image for %q returned empty layer list", ref.StringWithinTransport())
		}
		imageInfo, err := img.Inspect()
		if err != nil {
			t.Fatalf("Inspect(%q) returned error %v", ref.StringWithinTransport(), err)
		}
		if imageInfo.Created.IsZero() {
			t.Fatalf("Image %q claims to have been created at time 0", ref.StringWithinTransport())
		}

		src, err := ref.NewImageSource(systemContext(), []string{})
		if err != nil {
			t.Fatalf("NewImageSource(%q) returned error %v", ref.StringWithinTransport(), err)
		}
		if src == nil {
			t.Fatalf("NewImageSource(%q) returned no source", ref.StringWithinTransport())
		}
		if src.Reference().StringWithinTransport() != ref.StringWithinTransport() {
			// As long as it's only the addition of an ID suffix, that's okay.
			if !strings.HasPrefix(src.Reference().StringWithinTransport(), ref.StringWithinTransport()+"@") {
				t.Fatalf("NewImageSource(%q) changed the reference to %q", ref.StringWithinTransport(), src.Reference().StringWithinTransport())
			}
		}
		retrievedManifest, manifestType, err := src.GetManifest()
		if err != nil {
			t.Fatalf("GetManifest(%q) returned error %v", ref.StringWithinTransport(), err)
		}
		t.Logf("this manifest's type appears to be %q", manifestType)
		if string(retrievedManifest) != manifest {
			t.Fatalf("NewImageSource(%q) changed the manifest: %q was %q", ref.StringWithinTransport(), string(retrievedManifest), manifest)
		}
		sum = ddigest.SHA256.FromBytes([]byte(manifest))
		_, _, err = src.GetTargetManifest(sum)
		if err == nil {
			t.Fatalf("GetTargetManifest(%q) is supposed to fail", ref.StringWithinTransport())
		}
		sigs, err := src.GetSignatures()
		if err != nil {
			t.Fatalf("GetSignatures(%q) returned error %v", ref.StringWithinTransport(), err)
		}
		if len(sigs) < len(signatures) {
			t.Fatalf("Lost %d signatures", len(signatures)-len(sigs))
		}
		if len(sigs) > len(signatures) {
			t.Fatalf("Gained %d signatures", len(sigs)-len(signatures))
		}
		for i := range sigs {
			if bytes.Compare(sigs[i], signatures[i]) != 0 {
				t.Fatalf("Signature %d was corrupted", i)
			}
		}
		for _, layerInfo := range layerInfos {
			buf := bytes.Buffer{}
			layer, size, err := src.GetBlob(layerInfo)
			if err != nil {
				t.Fatalf("Error reading layer %q from %q", layerInfo.Digest, ref.StringWithinTransport())
			}
			t.Logf("Decompressing blob %q, blob size = %d, layerInfo.Size = %d bytes", layerInfo.Digest, size, layerInfo.Size)
			hasher := sha256.New()
			compressed := ioutils.NewWriteCounter(hasher)
			countedLayer := io.TeeReader(layer, compressed)
			decompressed, err := archive.DecompressStream(countedLayer)
			if err != nil {
				t.Fatalf("Error decompressing layer %q from %q", layerInfo.Digest, ref.StringWithinTransport())
			}
			n, err := io.Copy(&buf, decompressed)
			if layerInfo.Size >= 0 && compressed.Count != layerInfo.Size {
				t.Fatalf("Blob size is different than expected: %d != %d, read %d", compressed.Count, layerInfo.Size, n)
			}
			if size >= 0 && compressed.Count != size {
				t.Fatalf("Blob size mismatch: %d != %d, read %d", compressed.Count, size, n)
			}
			sum := hasher.Sum(nil)
			if ddigest.NewDigestFromBytes(ddigest.SHA256, sum) != layerInfo.Digest {
				t.Fatalf("Layer blob digest for %q doesn't match", ref.StringWithinTransport())
			}
		}
		src.Close()
		img.Close()
		err = ref.DeleteImage(systemContext())
		if err != nil {
			t.Fatalf("DeleteImage(%q) returned error %v", ref.StringWithinTransport(), err)
		}
	}
}
Пример #3
0
// PutBlob is used to both store filesystem layers and binary data that is part
// of the image.  Filesystem layers are assumed to be imported in order, as
// that is required by some of the underlying storage drivers.
func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
	blobSize := int64(-1)
	digest := blobinfo.Digest
	errorBlobInfo := types.BlobInfo{
		Digest: "",
		Size:   -1,
	}
	// Try to read an initial snippet of the blob.
	header := make([]byte, 10240)
	n, err := stream.Read(header)
	if err != nil && err != io.EOF {
		return errorBlobInfo, err
	}
	// Set up to read the whole blob (the initial snippet, plus the rest)
	// while digesting it with either the default, or the passed-in digest,
	// if one was specified.
	hasher := ddigest.Canonical.Digester()
	if digest.Validate() == nil {
		if a := digest.Algorithm(); a.Available() {
			hasher = a.Digester()
		}
	}
	hash := ""
	counter := ioutils.NewWriteCounter(hasher.Hash())
	defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream)
	multi := io.TeeReader(defragmented, counter)
	if (n > 0) && archive.IsArchive(header[:n]) {
		// It's a filesystem layer.  If it's not the first one in the
		// image, we assume that the most recently added layer is its
		// parent.
		parentLayer := ""
		for _, blob := range s.BlobList {
			if layerList, ok := s.Layers[blob.Digest]; ok {
				parentLayer = layerList[len(layerList)-1]
			}
		}
		// If we have an expected content digest, generate a layer ID
		// based on the parent's ID and the expected content digest.
		id := ""
		if digest.Validate() == nil {
			id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex()
		}
		// Attempt to create the identified layer and import its contents.
		layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi)
		if err != nil && err != storage.ErrDuplicateID {
			logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err)
			return errorBlobInfo, err
		}
		if err == storage.ErrDuplicateID {
			// We specified an ID, and there's already a layer with
			// the same ID.  Drain the input so that we can look at
			// its length and digest.
			_, err := io.Copy(ioutil.Discard, multi)
			if err != nil && err != io.EOF {
				logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err)
				return errorBlobInfo, err
			}
			hash = hasher.Digest().String()
		} else {
			// Applied the layer with the specified ID.  Note the
			// size info and computed digest.
			hash = hasher.Digest().String()
			layerMeta := storageLayerMetadata{
				Digest:         hash,
				CompressedSize: counter.Count,
				Size:           uncompressedSize,
			}
			if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil {
				s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata))
			}
			// Hang on to the new layer's ID.
			id = layer.ID
		}
		blobSize = counter.Count
		// Check if the size looks right.
		if blobinfo.Size >= 0 && blobSize != blobinfo.Size {
			logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size)
			if layer != nil {
				// Something's wrong; delete the newly-created layer.
				s.imageRef.transport.store.DeleteLayer(layer.ID)
			}
			return errorBlobInfo, ErrBlobSizeMismatch
		}
		// If the content digest was specified, verify it.
		if digest.Validate() == nil && digest.String() != hash {
			logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
			if layer != nil {
				// Something's wrong; delete the newly-created layer.
				s.imageRef.transport.store.DeleteLayer(layer.ID)
			}
			return errorBlobInfo, ErrBlobDigestMismatch
		}
		// If we didn't get a digest, construct one.
		if digest == "" {
			digest = ddigest.Digest(hash)
		}
		// Record that this layer blob is a layer, and the layer ID it
		// ended up having.  This is a list, in case the same blob is
		// being applied more than once.
		s.Layers[digest] = append(s.Layers[digest], id)
		s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize})
		if layer != nil {
			logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id)
		} else {
			logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id)
		}
	} else {
		// It's just data.  Finish scanning it in, check that our
		// computed digest matches the passed-in digest, and store it,
		// but leave it out of the blob-to-layer-ID map so that we can
		// tell that it's not a layer.
		blob, err := ioutil.ReadAll(multi)
		if err != nil && err != io.EOF {
			return errorBlobInfo, err
		}
		blobSize = int64(len(blob))
		hash = hasher.Digest().String()
		if blobinfo.Size >= 0 && blobSize != blobinfo.Size {
			logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, blobSize, blobinfo.Size)
			return errorBlobInfo, ErrBlobSizeMismatch
		}
		// If we were given a digest, verify that the content matches
		// it.
		if digest.Validate() == nil && digest.String() != hash {
			logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
			return errorBlobInfo, ErrBlobDigestMismatch
		}
		// If we didn't get a digest, construct one.
		if digest == "" {
			digest = ddigest.Digest(hash)
		}
		// Save the blob for when we Commit().
		s.BlobData[digest] = blob
		s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: blobSize})
		logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest)
	}
	return types.BlobInfo{
		Digest: digest,
		Size:   blobSize,
	}, nil
}