示例#1
0
文件: image.go 项目: Ch3ck/image
func (i *genericImage) getLayer(dest types.ImageDestination, digest string) error {
	stream, _, err := i.src.GetBlob(digest)
	if err != nil {
		return err
	}
	defer stream.Close()
	return dest.PutBlob(digest, stream)
}
示例#2
0
// Based on docker/distribution/manifest/schema1/config_builder.go
func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) (types.Image, error) {
	configBytes, err := m.ConfigBlob()
	if err != nil {
		return nil, err
	}
	imageConfig := &image{}
	if err := json.Unmarshal(configBytes, imageConfig); err != nil {
		return nil, err
	}

	// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
	fsLayers := make([]fsLayersSchema1, len(imageConfig.History))
	history := make([]historySchema1, len(imageConfig.History))
	nonemptyLayerIndex := 0
	var parentV1ID string // Set in the loop
	v1ID := ""
	haveGzippedEmptyLayer := false
	if len(imageConfig.History) == 0 {
		// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
		return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
	}
	for v2Index, historyEntry := range imageConfig.History {
		parentV1ID = v1ID
		v1Index := len(imageConfig.History) - 1 - v2Index

		var blobDigest digest.Digest
		if historyEntry.EmptyLayer {
			if !haveGzippedEmptyLayer {
				logrus.Debugf("Uploading empty layer during conversion to schema 1")
				info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))})
				if err != nil {
					return nil, errors.Wrap(err, "Error uploading empty layer")
				}
				if info.Digest != gzippedEmptyLayerDigest {
					return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
				}
				haveGzippedEmptyLayer = true
			}
			blobDigest = gzippedEmptyLayerDigest
		} else {
			if nonemptyLayerIndex >= len(m.LayersDescriptors) {
				return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
			}
			blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
			nonemptyLayerIndex++
		}

		// AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
		v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
		if err != nil {
			return nil, err
		}
		v1ID = v

		fakeImage := v1Compatibility{
			ID:        v1ID,
			Parent:    parentV1ID,
			Comment:   historyEntry.Comment,
			Created:   historyEntry.Created,
			Author:    historyEntry.Author,
			ThrowAway: historyEntry.EmptyLayer,
		}
		fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
		v1CompatibilityBytes, err := json.Marshal(&fakeImage)
		if err != nil {
			return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
		}

		fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
		history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)}
		// Note that parentV1ID of the top layer is preserved when exiting this loop
	}

	// Now patch in real configuration for the top layer (v1Index == 0)
	v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
	if err != nil {
		return nil, err
	}
	v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
	if err != nil {
		return nil, err
	}
	history[0].V1Compatibility = string(v1Config)

	m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
	return memoryImageFromManifest(m1), nil
}