예제 #1
0
// revisions returns all revisions with the specified name and tag.
func (ts *tagStore) revisions(tag string) ([]digest.Digest, error) {
	manifestTagIndexPath, err := ts.pm.path(manifestTagIndexPathSpec{
		name: ts.Name(),
		tag:  tag,
	})

	if err != nil {
		return nil, err
	}

	// TODO(stevvooe): Need to append digest alg to get listing of revisions.
	manifestTagIndexPath = path.Join(manifestTagIndexPath, "sha256")

	entries, err := ts.driver.List(ts.repository.ctx, manifestTagIndexPath)
	if err != nil {
		return nil, err
	}

	var revisions []digest.Digest
	for _, entry := range entries {
		revisions = append(revisions, digest.NewDigestFromHex("sha256", path.Base(entry)))
	}

	return revisions, nil
}
예제 #2
0
파일: trust.go 프로젝트: supasate/docker
func convertTarget(t client.Target) (target, error) {
	h, ok := t.Hashes["sha256"]
	if !ok {
		return target{}, errors.New("no valid hash, expecting sha256")
	}
	return target{
		reference: registry.ParseReference(t.Name),
		digest:    digest.NewDigestFromHex("sha256", hex.EncodeToString(h)),
		size:      t.Length,
	}, nil
}
예제 #3
0
// Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) {

	digestPath = strings.TrimSuffix(digestPath, "/data")
	dir, hex := path.Split(digestPath)
	dir = path.Dir(dir)
	dir, next := path.Split(dir)

	// next is either the algorithm OR the first two characters in the hex string
	var algo string
	if next == hex[:2] {
		algo = path.Base(dir)
	} else {
		algo = next
	}

	dgst := digest.NewDigestFromHex(algo, hex)
	return dgst, dgst.Validate()
}
예제 #4
0
파일: fs.go 프로젝트: CadeLaRen/docker-3
// Walk calls the supplied callback for each image ID in the storage backend.
func (s *fs) Walk(f IDWalkFunc) error {
	// Only Canonical digest (sha256) is currently supported
	s.RLock()
	dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical)))
	s.RUnlock()
	if err != nil {
		return err
	}
	for _, v := range dir {
		dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name())
		if err := dgst.Validate(); err != nil {
			logrus.Debugf("Skipping invalid digest %s: %s", dgst, err)
			continue
		}
		if err := f(ID(dgst)); err != nil {
			return err
		}
	}
	return nil
}
예제 #5
0
파일: filestore.go 프로젝트: leobcn/docker
func (fms *fileMetadataStore) List() ([]ChainID, []string, error) {
	var ids []ChainID
	for _, algorithm := range supportedAlgorithms {
		fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm)))
		if err != nil {
			if os.IsNotExist(err) {
				continue
			}
			return nil, nil, err
		}

		for _, fi := range fileInfos {
			if fi.IsDir() && fi.Name() != "mounts" {
				dgst := digest.NewDigestFromHex(string(algorithm), fi.Name())
				if err := dgst.Validate(); err != nil {
					logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name())
				} else {
					ids = append(ids, ChainID(dgst))
				}
			}
		}
	}

	fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts"))
	if err != nil {
		if os.IsNotExist(err) {
			return ids, []string{}, nil
		}
		return nil, nil, err
	}

	var mounts []string
	for _, fi := range fileInfos {
		if fi.IsDir() {
			mounts = append(mounts, fi.Name())
		}
	}

	return ids, mounts, nil
}
예제 #6
0
func (cs *ContentStore) Walk(fn func(path string, dgst digest.Digest) error) error {
	root := filepath.Join(cs.root, "blobs")
	var alg digest.Algorithm
	return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
		if !fi.IsDir() && !alg.Available() {
			return nil
		}

		// TODO(stevvooe): There are few more cases with subdirs that should be
		// handled in case the layout gets corrupted. This isn't strict enough
		// an may spew bad data.

		if path == root {
			return nil
		} else if filepath.Dir(path) == root {
			alg = digest.Algorithm(filepath.Base(path))

			if !alg.Available() {
				alg = ""
				return filepath.SkipDir
			}

			// descending into a hash directory
			return nil
		}

		dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
		if err := dgst.Validate(); err != nil {
			// log error but don't report
			log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
			// if we see this, it could mean some sort of corruption of the
			// store or extra paths not expected previously.
		}

		return fn(path, dgst)
	})
}
예제 #7
0
파일: trust.go 프로젝트: harche/docker
func trustedResolveDigest(ctx context.Context, cli *command.DockerCli, ref reference.NamedTagged) (distreference.Canonical, error) {
	repoInfo, err := registry.ParseRepositoryInfo(ref)
	if err != nil {
		return nil, err
	}

	authConfig := command.ResolveAuthConfig(ctx, cli, repoInfo.Index)

	notaryRepo, err := trust.GetNotaryRepository(cli, repoInfo, authConfig, "pull")
	if err != nil {
		return nil, errors.Wrap(err, "error establishing connection to trust repository")
	}

	t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
	if err != nil {
		return nil, trust.NotaryError(repoInfo.FullName(), err)
	}
	// Only get the tag if it's in the top level targets role or the releases delegation role
	// ignore it if it's in any other delegation roles
	if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
		return nil, trust.NotaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String()))
	}

	logrus.Debugf("retrieving target for %s role\n", t.Role)
	h, ok := t.Hashes["sha256"]
	if !ok {
		return nil, errors.New("no valid hash, expecting sha256")
	}

	dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h))

	// Using distribution reference package to make sure that adding a
	// digest does not erase the tag. When the two reference packages
	// are unified, this will no longer be an issue.
	return distreference.WithDigest(ref, dgst)
}
예제 #8
0
파일: walk.go 프로젝트: RomainVabre/origin
// makeBlobStoreWalkFunc returns a function for walking a blob store at
// particular rootPath. The returned function calls a given ingest callback on
// each digest found. The blob store is expected to have following layout:
//
//     if multilevel is true:
//       <rootPath>/<alg>/<prefix>/<digest>
//       <rootPath>/tarsum/<version>/<alg>/<prefix>/<digest>
//     otherwise:
//       <rootPath>/<alg>/<digest>
//       <rootPath>/tarsum/<version>/<alg>/<digest>
func makeBlobStoreWalkFunc(rootPath string, multilevel bool, ingest func(digest.Digest) error) (WalkFn, error) {
	var (
		// number of slashes in a path to a full digest directory under a rootPath
		blobRefPathSepCount       int
		blobTarsumRefPathSepCount int
	)

	if multilevel {
		// <alg>/<prefix>/<digest>
		blobRefPathSepCount = 2
		// tarsum/<version>/<alg>/<prefix>/<digest>
		blobTarsumRefPathSepCount = 4
	} else {
		// <alg>/<digest>
		blobRefPathSepCount = 1
		// tarsum/<version>/<alg>/<digest>
		blobTarsumRefPathSepCount = 3
	}

	return func(fi storageDriver.FileInfo) error {
		if !fi.IsDir() {
			// ignore files
			return nil
		}

		// trim <from>/ prefix
		pth := strings.TrimPrefix(strings.TrimPrefix(fi.Path(), rootPath), "/")
		sepCount := strings.Count(pth, "/")

		if sepCount < blobRefPathSepCount {
			// don't bother finding digests in a too short path
			return nil
		}

		alg := ""
		tarsumParts := reTarsumPrefix.FindStringSubmatch(pth)
		isTarsum := len(tarsumParts) > 0
		if sepCount > blobTarsumRefPathSepCount || (!isTarsum && sepCount > blobRefPathSepCount) {
			// too many path components
			return ErrSkipDir
		}

		if len(tarsumParts) > 0 {
			alg = "tarsum." + tarsumParts[1] + "+"
			// trim "tarsum/<version>/" prefix from path
			pth = strings.TrimPrefix(pth[len(tarsumParts[0]):], "/")
		}

		digestParts := reDigestPath.FindStringSubmatch(pth)
		if len(digestParts) > 0 {
			alg += digestParts[1]
			dgstHex := digestParts[2]
			dgst := digest.NewDigestFromHex(alg, dgstHex)
			// append only valid digests
			if err := dgst.Validate(); err == nil {
				err := ingest(dgst)
				if err != nil {
					return ErrFinishedWalk
				}
			}
			return ErrSkipDir
		}

		return nil
	}, nil
}