コード例 #1
0
ファイル: public.go プロジェクト: pombredanne/aptly
// RemoveDirs removes directory structure under public path
func (storage *PublishedStorage) RemoveDirs(path string, progress aptly.Progress) error {
	filepath := filepath.Join(storage.rootPath, path)
	if progress != nil {
		progress.Printf("Removing %s...\n", filepath)
	}
	return os.RemoveAll(filepath)
}
コード例 #2
0
ファイル: publish.go プロジェクト: ryanuber/aptly
// CleanupPrefixComponentFiles removes all unreferenced files in published storage under prefix/component pair
func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(prefix, component string,
	publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, progress aptly.Progress) error {

	var err error
	referencedFiles := []string{}

	if progress != nil {
		progress.Printf("Cleaning up prefix %#v component %#v...\n", prefix, component)
	}

	for _, r := range collection.list {
		if r.Prefix == prefix && r.Component == component {
			err = collection.LoadComplete(r, collectionFactory)
			if err != nil {
				return err
			}

			packageList, err := NewPackageListFromRefList(r.RefList(), collectionFactory.PackageCollection(), progress)
			if err != nil {
				return err
			}

			packageList.ForEach(func(p *Package) error {
				poolDir, err := p.PoolDirectory()
				if err != nil {
					return err
				}

				for _, f := range p.Files() {
					referencedFiles = append(referencedFiles, filepath.Join(poolDir, f.Filename))
				}

				return nil
			})
		}
	}

	sort.Strings(referencedFiles)

	rootPath := filepath.Join(prefix, "pool", component)
	existingFiles, err := publishedStorage.Filelist(rootPath)
	if err != nil {
		return err
	}

	sort.Strings(existingFiles)

	filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles)

	for _, file := range filesToDelete {
		err = publishedStorage.Remove(filepath.Join(rootPath, file))
		if err != nil {
			return err
		}
	}

	return nil
}
コード例 #3
0
// NewPackageListFromRefList loads packages list from PackageRefList
func NewPackageListFromRefList(reflist *PackageRefList, collection *PackageCollection, progress aptly.Progress) (*PackageList, error) {
	// empty reflist
	if reflist == nil {
		return NewPackageList(), nil
	}

	result := &PackageList{packages: make(map[string]*Package, reflist.Len())}

	if progress != nil {
		progress.InitBar(int64(reflist.Len()), false)
	}

	err := reflist.ForEach(func(key []byte) error {
		p, err2 := collection.ByKey(key)
		if err2 != nil {
			return fmt.Errorf("unable to load package with key %s: %s", key, err2)
		}
		if progress != nil {
			progress.AddBar(1)
		}
		return result.Add(p)
	})

	if progress != nil {
		progress.ShutdownBar()
	}

	if err != nil {
		return nil, err
	}

	return result, nil
}
コード例 #4
0
ファイル: package_pool.go プロジェクト: pombredanne/aptly
// FilepathList returns file paths of all the files in the pool
func (pool *PackagePool) FilepathList(progress aptly.Progress) ([]string, error) {
	pool.Lock()
	defer pool.Unlock()

	dirs, err := ioutil.ReadDir(pool.rootPath)
	if err != nil {
		if os.IsNotExist(err) {
			return nil, nil
		}
		return nil, err
	}

	if len(dirs) == 0 {
		return nil, nil
	}

	if progress != nil {
		progress.InitBar(int64(len(dirs)), false)
		defer progress.ShutdownBar()
	}

	result := []string{}

	for _, dir := range dirs {
		err = filepath.Walk(filepath.Join(pool.rootPath, dir.Name()), func(path string, info os.FileInfo, err error) error {
			if err != nil {
				return err
			}
			if !info.IsDir() {
				result = append(result, path[len(pool.rootPath)+1:])
			}
			return nil
		})
		if err != nil {
			return nil, err
		}

		if progress != nil {
			progress.AddBar(1)
		}
	}

	return result, nil
}
コード例 #5
0
ファイル: index_files.go プロジェクト: liftup/aptly
func (files *indexFiles) FinalizeAll(progress aptly.Progress) (err error) {
	if progress != nil {
		progress.InitBar(int64(len(files.indexes)), false)
		defer progress.ShutdownBar()
	}

	for _, file := range files.indexes {
		err = file.Finalize(nil)
		if err != nil {
			return
		}
		if progress != nil {
			progress.AddBar(1)
		}
	}

	files.indexes = make(map[string]*indexFile)

	return
}
コード例 #6
0
ファイル: remote.go プロジェクト: taku-k/aptly
// DownloadPackageIndexes downloads & parses package index files
func (repo *RemoteRepo) DownloadPackageIndexes(progress aptly.Progress, d aptly.Downloader, collectionFactory *CollectionFactory,
	ignoreMismatch bool) error {
	if repo.packageList != nil {
		panic("packageList != nil")
	}
	repo.packageList = NewPackageList()

	// Download and parse all Packages & Source files
	packagesURLs := [][]string{}

	if repo.IsFlat() {
		packagesURLs = append(packagesURLs, []string{repo.FlatBinaryURL().String(), "binary"})
		if repo.DownloadSources {
			packagesURLs = append(packagesURLs, []string{repo.FlatSourcesURL().String(), "source"})
		}
	} else {
		for _, component := range repo.Components {
			for _, architecture := range repo.Architectures {
				packagesURLs = append(packagesURLs, []string{repo.BinaryURL(component, architecture).String(), "binary"})
				if repo.DownloadUdebs {
					packagesURLs = append(packagesURLs, []string{repo.UdebURL(component, architecture).String(), "udeb"})
				}
			}
			if repo.DownloadSources {
				packagesURLs = append(packagesURLs, []string{repo.SourcesURL(component).String(), "source"})
			}
		}
	}

	for _, info := range packagesURLs {
		url, kind := info[0], info[1]
		packagesReader, packagesFile, err := http.DownloadTryCompression(d, url, repo.ReleaseFiles, ignoreMismatch)
		if err != nil {
			return err
		}
		defer packagesFile.Close()

		stat, _ := packagesFile.Stat()
		progress.InitBar(stat.Size(), true)

		sreader := NewControlFileReader(packagesReader)

		for {
			stanza, err := sreader.ReadStanza()
			if err != nil {
				return err
			}
			if stanza == nil {
				break
			}

			off, _ := packagesFile.Seek(0, 1)
			progress.SetBar(int(off))

			var p *Package

			if kind == "binary" {
				p = NewPackageFromControlFile(stanza)
			} else if kind == "udeb" {
				p = NewUdebPackageFromControlFile(stanza)
			} else if kind == "source" {
				p, err = NewSourcePackageFromControlFile(stanza)
				if err != nil {
					return err
				}
			}
			err = repo.packageList.Add(p)
			if err != nil {
				if _, ok := err.(*PackageConflictError); ok {
					progress.ColoredPrintf("@y[!]@| @!skipping package %s: duplicate in packages index@|", p)
				} else {
					return err
				}
			}

			err = collectionFactory.PackageCollection().Update(p)
			if err != nil {
				return err
			}
		}

		progress.ShutdownBar()
	}

	return nil
}
コード例 #7
0
ファイル: publish.go プロジェクト: pombredanne/aptly
// CleanupPrefixComponentFiles removes all unreferenced files in published storage under prefix/component pair
func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(prefix string, components []string,
	publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, progress aptly.Progress) error {

	var err error
	referencedFiles := map[string][]string{}

	if progress != nil {
		progress.Printf("Cleaning up prefix %#v components %s...\n", prefix, strings.Join(components, ", "))
	}

	for _, r := range collection.list {
		if r.Prefix == prefix {
			matches := false

			repoComponents := r.Components()

			for _, component := range components {
				if utils.StrSliceHasItem(repoComponents, component) {
					matches = true
					break
				}
			}

			if !matches {
				continue
			}

			err = collection.LoadComplete(r, collectionFactory)
			if err != nil {
				return err
			}

			for _, component := range components {
				if utils.StrSliceHasItem(repoComponents, component) {
					packageList, err := NewPackageListFromRefList(r.RefList(component), collectionFactory.PackageCollection(), progress)
					if err != nil {
						return err
					}

					packageList.ForEach(func(p *Package) error {
						poolDir, err := p.PoolDirectory()
						if err != nil {
							return err
						}

						for _, f := range p.Files() {
							referencedFiles[component] = append(referencedFiles[component], filepath.Join(poolDir, f.Filename))
						}

						return nil
					})
				}
			}
		}
	}

	for _, component := range components {
		sort.Strings(referencedFiles[component])

		rootPath := filepath.Join(prefix, "pool", component)
		existingFiles, err := publishedStorage.Filelist(rootPath)
		if err != nil {
			return err
		}

		sort.Strings(existingFiles)

		filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles[component])

		for _, file := range filesToDelete {
			err = publishedStorage.Remove(filepath.Join(rootPath, file))
			if err != nil {
				return err
			}
		}
	}

	return nil
}
コード例 #8
0
ファイル: publish.go プロジェクト: pombredanne/aptly
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them
func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider,
	collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress, forceOverwrite bool) error {
	publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage)

	err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool"))
	if err != nil {
		return err
	}
	basePath := filepath.Join(p.Prefix, "dists", p.Distribution)
	err = publishedStorage.MkDir(basePath)
	if err != nil {
		return err
	}

	if progress != nil {
		progress.Printf("Loading packages...\n")
	}

	lists := map[string]*PackageList{}

	for component := range p.sourceItems {
		// Load all packages
		lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress)
		if err != nil {
			return fmt.Errorf("unable to load packages: %s", err)
		}
	}

	if !p.rePublishing {
		if len(p.Architectures) == 0 {
			for _, list := range lists {
				p.Architectures = append(p.Architectures, list.Architectures(true)...)
			}
		}

		if len(p.Architectures) == 0 {
			return fmt.Errorf("unable to figure out list of architectures, please supply explicit list")
		}

		sort.Strings(p.Architectures)
		p.Architectures = utils.StrSliceDeduplicate(p.Architectures)
	}

	var suffix string
	if p.rePublishing {
		suffix = ".tmp"
	}

	if progress != nil {
		progress.Printf("Generating metadata files and linking package files...\n")
	}

	var tempDir string
	tempDir, err = ioutil.TempDir(os.TempDir(), "aptly")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tempDir)

	indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix)

	for component, list := range lists {
		hadUdebs := false

		// For all architectures, pregenerate packages/sources files
		for _, arch := range p.Architectures {
			indexes.PackageIndex(component, arch, false)
		}

		if progress != nil {
			progress.InitBar(int64(list.Len()), false)
		}

		list.PrepareIndex()

		contentIndexes := map[string]*ContentsIndex{}

		err = list.ForEachIndexed(func(pkg *Package) error {
			if progress != nil {
				progress.AddBar(1)
			}

			matches := false
			for _, arch := range p.Architectures {
				if pkg.MatchesArchitecture(arch) {
					matches = true
					break
				}
			}

			if matches {
				hadUdebs = hadUdebs || pkg.IsUdeb
				err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite)
				if err != nil {
					return err
				}
			}

			for _, arch := range p.Architectures {
				if pkg.MatchesArchitecture(arch) {
					var bufWriter *bufio.Writer

					if !p.SkipContents {
						key := fmt.Sprintf("%s-%v", arch, pkg.IsUdeb)

						contentIndex := contentIndexes[key]

						if contentIndex == nil {
							contentIndex = NewContentsIndex()
							contentIndexes[key] = contentIndex
						}

						contentIndex.Push(pkg, packagePool)
					}

					bufWriter, err = indexes.PackageIndex(component, arch, pkg.IsUdeb).BufWriter()
					if err != nil {
						return err
					}

					err = pkg.Stanza().WriteTo(bufWriter, pkg.IsSource, false)
					if err != nil {
						return err
					}
					err = bufWriter.WriteByte('\n')
					if err != nil {
						return err
					}
				}
			}

			pkg.files = nil
			pkg.deps = nil
			pkg.extra = nil
			pkg.contents = nil

			return nil
		})

		if err != nil {
			return fmt.Errorf("unable to process packages: %s", err)
		}

		for _, arch := range p.Architectures {
			for _, udeb := range []bool{true, false} {
				index := contentIndexes[fmt.Sprintf("%s-%v", arch, udeb)]
				if index == nil || index.Empty() {
					continue
				}

				bufWriter, err := indexes.ContentsIndex(component, arch, udeb).BufWriter()
				if err != nil {
					return fmt.Errorf("unable to generate contents index: %v", err)
				}

				_, err = index.WriteTo(bufWriter)
				if err != nil {
					return fmt.Errorf("unable to generate contents index: %v", err)
				}
			}
		}

		if progress != nil {
			progress.ShutdownBar()
		}

		udebs := []bool{false}
		if hadUdebs {
			udebs = append(udebs, true)

			// For all architectures, pregenerate .udeb indexes
			for _, arch := range p.Architectures {
				indexes.PackageIndex(component, arch, true)
			}
		}

		// For all architectures, generate Release files
		for _, arch := range p.Architectures {
			for _, udeb := range udebs {
				release := make(Stanza)
				release["Archive"] = p.Distribution
				release["Architecture"] = arch
				release["Component"] = component
				release["Origin"] = p.GetOrigin()
				release["Label"] = p.GetLabel()

				var bufWriter *bufio.Writer
				bufWriter, err = indexes.ReleaseIndex(component, arch, udeb).BufWriter()
				if err != nil {
					return fmt.Errorf("unable to get ReleaseIndex writer: %s", err)
				}

				err = release.WriteTo(bufWriter, false, true)
				if err != nil {
					return fmt.Errorf("unable to create Release file: %s", err)
				}
			}
		}
	}

	if progress != nil {
		progress.Printf("Finalizing metadata files...\n")
	}

	err = indexes.FinalizeAll(progress)
	if err != nil {
		return err
	}

	release := make(Stanza)
	release["Origin"] = p.GetOrigin()
	release["Label"] = p.GetLabel()
	release["Suite"] = p.Distribution
	release["Codename"] = p.Distribution
	release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST")
	release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ")
	release["Description"] = " Generated by aptly\n"
	release["MD5Sum"] = ""
	release["SHA1"] = ""
	release["SHA256"] = ""
	release["SHA512"] = ""

	release["Components"] = strings.Join(p.Components(), " ")

	for path, info := range indexes.generatedFiles {
		release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path)
		release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path)
		release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path)
		release["SHA512"] += fmt.Sprintf(" %s %8d %s\n", info.SHA512, info.Size, path)
	}

	releaseFile := indexes.ReleaseFile()
	bufWriter, err := releaseFile.BufWriter()
	if err != nil {
		return err
	}

	err = release.WriteTo(bufWriter, false, true)
	if err != nil {
		return fmt.Errorf("unable to create Release file: %s", err)
	}

	// Signing files might output to console, so flush progress writer first
	if progress != nil {
		progress.Flush()
	}

	err = releaseFile.Finalize(signer)
	if err != nil {
		return err
	}

	err = indexes.RenameFiles()
	if err != nil {
		return err
	}

	return nil
}
コード例 #9
0
ファイル: remote.go プロジェクト: hdonnay/aptly
// Download downloads all repo files
func (repo *RemoteRepo) Download(progress aptly.Progress, d aptly.Downloader, packageCollection *PackageCollection, packagePool aptly.PackagePool, ignoreMismatch bool) error {
	list := NewPackageList()

	progress.Printf("Downloading & parsing package files...\n")

	// Download and parse all Packages & Source files
	packagesURLs := [][]string{}

	if repo.IsFlat() {
		packagesURLs = append(packagesURLs, []string{repo.FlatBinaryURL().String(), "binary"})
		if repo.DownloadSources {
			packagesURLs = append(packagesURLs, []string{repo.FlatSourcesURL().String(), "source"})
		}
	} else {
		for _, component := range repo.Components {
			for _, architecture := range repo.Architectures {
				packagesURLs = append(packagesURLs, []string{repo.BinaryURL(component, architecture).String(), "binary"})
			}
			if repo.DownloadSources {
				packagesURLs = append(packagesURLs, []string{repo.SourcesURL(component).String(), "source"})
			}
		}
	}

	for _, info := range packagesURLs {
		url, kind := info[0], info[1]
		packagesReader, packagesFile, err := http.DownloadTryCompression(d, url, repo.ReleaseFiles, ignoreMismatch)
		if err != nil {
			return err
		}
		defer packagesFile.Close()

		sreader := NewControlFileReader(packagesReader)

		for {
			stanza, err := sreader.ReadStanza()
			if err != nil {
				return err
			}
			if stanza == nil {
				break
			}

			var p *Package

			if kind == "binary" {
				p = NewPackageFromControlFile(stanza)
			} else if kind == "source" {
				p, err = NewSourcePackageFromControlFile(stanza)
				if err != nil {
					return err
				}
			}
			list.Add(p)
		}
	}

	progress.Printf("Saving packages to database...\n")

	progress.InitBar(int64(list.Len()), false)

	packageCollection.db.StartBatch()
	count := 0

	// Save package meta information to DB
	err := list.ForEach(func(p *Package) error {
		progress.AddBar(1)
		count++
		if count > 1000 {
			count = 0
			err := packageCollection.db.FinishBatch()
			if err != nil {
				return err
			}
			packageCollection.db.StartBatch()
		}
		return packageCollection.Update(p)
	})
	if err != nil {
		return fmt.Errorf("unable to save packages to db: %s", err)
	}

	err = packageCollection.db.FinishBatch()
	if err != nil {
		return fmt.Errorf("unable to save packages to db: %s", err)
	}

	progress.ShutdownBar()

	progress.Printf("Building download queue...\n")

	// Build download queue
	queued := make(map[string]PackageDownloadTask, list.Len())
	count = 0
	downloadSize := int64(0)

	err = list.ForEach(func(p *Package) error {
		list, err := p.DownloadList(packagePool)
		if err != nil {
			return err
		}

		for _, task := range list {
			key := task.RepoURI + "-" + task.DestinationPath
			_, found := queued[key]
			if !found {
				count++
				downloadSize += task.Checksums.Size
				queued[key] = task
			}
		}

		return nil
	})
	if err != nil {
		return fmt.Errorf("unable to build download queue: %s", err)
	}

	repo.packageRefs = NewPackageRefListFromPackageList(list)
	// free up package list, we don't need it after this point
	list = nil

	progress.Printf("Download queue: %d items, %.2f GiB size\n", count, float64(downloadSize)/(1024.0*1024.0*1024.0))

	progress.InitBar(downloadSize, true)

	// Download all package files
	ch := make(chan error, len(queued))

	for _, task := range queued {
		d.DownloadWithChecksum(repo.PackageURL(task.RepoURI).String(), task.DestinationPath, ch, task.Checksums, ignoreMismatch)
	}

	// We don't need queued after this point
	queued = nil

	// Wait for all downloads to finish
	errors := make([]string, 0)

	for count > 0 {
		err = <-ch
		if err != nil {
			errors = append(errors, err.Error())
		}
		count--
	}

	progress.ShutdownBar()

	if len(errors) > 0 {
		return fmt.Errorf("download errors:\n  %s\n", strings.Join(errors, "\n  "))
	}

	repo.LastDownloadDate = time.Now()

	return nil
}
コード例 #10
0
ファイル: publish.go プロジェクト: ryanuber/aptly
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them
func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorage aptly.PublishedStorage,
	collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress) error {
	err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool"))
	if err != nil {
		return err
	}
	basePath := filepath.Join(p.Prefix, "dists", p.Distribution)
	err = publishedStorage.MkDir(basePath)
	if err != nil {
		return err
	}

	if progress != nil {
		progress.Printf("Loading packages...\n")
	}

	// Load all packages
	list, err := NewPackageListFromRefList(p.RefList(), collectionFactory.PackageCollection(), progress)
	if err != nil {
		return fmt.Errorf("unable to load packages: %s", err)
	}

	if list.Len() == 0 {
		return fmt.Errorf("source is empty")
	}

	if !p.rePublishing {
		if len(p.Architectures) == 0 {
			p.Architectures = list.Architectures(true)
		}

		if len(p.Architectures) == 0 {
			return fmt.Errorf("unable to figure out list of architectures, please supply explicit list")
		}

		sort.Strings(p.Architectures)
	}

	var suffix string
	if p.rePublishing {
		suffix = ".tmp"
	}

	generatedFiles := map[string]utils.ChecksumInfo{}
	renameMap := map[string]string{}

	if progress != nil {
		progress.Printf("Generating metadata files and linking package files...\n")
	}

	// For all architectures, generate release file
	for _, arch := range p.Architectures {
		if progress != nil {
			progress.InitBar(int64(list.Len()), false)
		}

		var relativePath string
		if arch == "source" {
			relativePath = filepath.Join(p.Component, "source", "Sources")
		} else {
			relativePath = filepath.Join(p.Component, fmt.Sprintf("binary-%s", arch), "Packages")
		}
		err = publishedStorage.MkDir(filepath.Dir(filepath.Join(basePath, relativePath)))
		if err != nil {
			return err
		}

		var packagesFile *os.File
		packagesFile, err = publishedStorage.CreateFile(filepath.Join(basePath, relativePath+suffix))
		if err != nil {
			return fmt.Errorf("unable to creates Packages file: %s", err)
		}

		if suffix != "" {
			renameMap[filepath.Join(basePath, relativePath+suffix)] = filepath.Join(basePath, relativePath)
		}

		bufWriter := bufio.NewWriter(packagesFile)

		err = list.ForEach(func(pkg *Package) error {
			if progress != nil {
				progress.AddBar(1)
			}
			if pkg.MatchesArchitecture(arch) {
				err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, p.Component)
				if err != nil {
					return err
				}

				err = pkg.Stanza().WriteTo(bufWriter)
				if err != nil {
					return err
				}
				err = bufWriter.WriteByte('\n')
				if err != nil {
					return err
				}

				pkg.files = nil
				pkg.deps = nil
				pkg.extra = nil

			}

			return nil
		})

		if err != nil {
			return fmt.Errorf("unable to process packages: %s", err)
		}

		err = bufWriter.Flush()
		if err != nil {
			return fmt.Errorf("unable to write Packages file: %s", err)
		}

		err = utils.CompressFile(packagesFile)
		if err != nil {
			return fmt.Errorf("unable to compress Packages files: %s", err)
		}

		if suffix != "" {
			renameMap[filepath.Join(basePath, relativePath+suffix+".gz")] = filepath.Join(basePath, relativePath+".gz")
			renameMap[filepath.Join(basePath, relativePath+suffix+".bz2")] = filepath.Join(basePath, relativePath+".bz2")
		}

		packagesFile.Close()

		var checksumInfo utils.ChecksumInfo
		checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix))
		if err != nil {
			return fmt.Errorf("unable to collect checksums: %s", err)
		}
		generatedFiles[relativePath] = checksumInfo

		checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix+".gz"))
		if err != nil {
			return fmt.Errorf("unable to collect checksums: %s", err)
		}
		generatedFiles[relativePath+".gz"] = checksumInfo

		checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix+".bz2"))
		if err != nil {
			return fmt.Errorf("unable to collect checksums: %s", err)
		}
		generatedFiles[relativePath+".bz2"] = checksumInfo

		if progress != nil {
			progress.ShutdownBar()
		}
	}

	release := make(Stanza)
	if p.Origin == "" {
		release["Origin"] = p.Prefix + " " + p.Distribution
	} else {
		release["Origin"] = p.Origin
	}
	if p.Label == "" {
		release["Label"] = p.Prefix + " " + p.Distribution
	} else {
		release["Label"] = p.Label
	}
	release["Codename"] = p.Distribution
	release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST")
	release["Components"] = p.Component
	release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ")
	release["Description"] = " Generated by aptly\n"
	release["MD5Sum"] = "\n"
	release["SHA1"] = "\n"
	release["SHA256"] = "\n"

	for path, info := range generatedFiles {
		release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path)
		release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path)
		release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path)
	}

	releaseFile, err := publishedStorage.CreateFile(filepath.Join(basePath, "Release"+suffix))
	if err != nil {
		return fmt.Errorf("unable to create Release file: %s", err)
	}

	if suffix != "" {
		renameMap[filepath.Join(basePath, "Release"+suffix)] = filepath.Join(basePath, "Release")
	}

	bufWriter := bufio.NewWriter(releaseFile)

	err = release.WriteTo(bufWriter)
	if err != nil {
		return fmt.Errorf("unable to create Release file: %s", err)
	}

	err = bufWriter.Flush()
	if err != nil {
		return fmt.Errorf("unable to create Release file: %s", err)
	}

	releaseFilename := releaseFile.Name()
	releaseFile.Close()

	// Signing files might output to console, so flush progress writer first
	if progress != nil {
		progress.Flush()
	}

	if signer != nil {
		err = signer.DetachedSign(releaseFilename, releaseFilename+".gpg")
		if err != nil {
			return fmt.Errorf("unable to sign Release file: %s", err)
		}

		err = signer.ClearSign(releaseFilename, filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix))
		if err != nil {
			return fmt.Errorf("unable to sign Release file: %s", err)
		}

		if suffix != "" {
			renameMap[filepath.Join(basePath, "Release"+suffix+".gpg")] = filepath.Join(basePath, "Release.gpg")
			renameMap[filepath.Join(basePath, "InRelease"+suffix)] = filepath.Join(basePath, "InRelease")
		}

	}

	for oldName, newName := range renameMap {
		err = publishedStorage.RenameFile(oldName, newName)
		if err != nil {
			return fmt.Errorf("unable to rename: %s", err)
		}
	}

	return nil
}
コード例 #11
0
// Download downloads all repo files
func (repo *RemoteRepo) Download(progress aptly.Progress, d aptly.Downloader, collectionFactory *CollectionFactory,
	packagePool aptly.PackagePool, ignoreMismatch bool, dependencyOptions int, filterQuery PackageQuery) error {
	list := NewPackageList()

	progress.Printf("Downloading & parsing package files...\n")

	// Download and parse all Packages & Source files
	packagesURLs := [][]string{}

	if repo.IsFlat() {
		packagesURLs = append(packagesURLs, []string{repo.FlatBinaryURL().String(), "binary"})
		if repo.DownloadSources {
			packagesURLs = append(packagesURLs, []string{repo.FlatSourcesURL().String(), "source"})
		}
	} else {
		for _, component := range repo.Components {
			for _, architecture := range repo.Architectures {
				packagesURLs = append(packagesURLs, []string{repo.BinaryURL(component, architecture).String(), "binary"})
			}
			if repo.DownloadSources {
				packagesURLs = append(packagesURLs, []string{repo.SourcesURL(component).String(), "source"})
			}
		}
	}

	for _, info := range packagesURLs {
		url, kind := info[0], info[1]
		packagesReader, packagesFile, err := http.DownloadTryCompression(d, url, repo.ReleaseFiles, ignoreMismatch)
		if err != nil {
			return err
		}
		defer packagesFile.Close()

		stat, _ := packagesFile.Stat()
		progress.InitBar(stat.Size(), true)

		sreader := NewControlFileReader(packagesReader)

		for {
			stanza, err := sreader.ReadStanza()
			if err != nil {
				return err
			}
			if stanza == nil {
				break
			}

			off, _ := packagesFile.Seek(0, 1)
			progress.SetBar(int(off))

			var p *Package

			if kind == "binary" {
				p = NewPackageFromControlFile(stanza)
			} else if kind == "source" {
				p, err = NewSourcePackageFromControlFile(stanza)
				if err != nil {
					return err
				}
			}
			err = list.Add(p)
			if err != nil {
				return err
			}

			err = collectionFactory.PackageCollection().Update(p)
			if err != nil {
				return err
			}
		}

		progress.ShutdownBar()
	}

	var err error

	if repo.Filter != "" {
		progress.Printf("Applying filter...\n")

		list.PrepareIndex()

		emptyList := NewPackageList()
		emptyList.PrepareIndex()

		origPackages := list.Len()
		list, err = list.Filter([]PackageQuery{filterQuery}, repo.FilterWithDeps, emptyList, dependencyOptions, repo.Architectures)
		if err != nil {
			return err
		}

		progress.Printf("Packages filtered: %d -> %d.\n", origPackages, list.Len())
	}

	progress.Printf("Building download queue...\n")

	// Build download queue
	queued := make(map[string]PackageDownloadTask, list.Len())
	count := 0
	downloadSize := int64(0)

	err = list.ForEach(func(p *Package) error {
		list, err2 := p.DownloadList(packagePool)
		if err2 != nil {
			return err2
		}
		p.files = nil

		for _, task := range list {
			key := task.RepoURI + "-" + task.DestinationPath
			_, found := queued[key]
			if !found {
				count++
				downloadSize += task.Checksums.Size
				queued[key] = task
			}
		}

		return nil
	})
	if err != nil {
		return fmt.Errorf("unable to build download queue: %s", err)
	}

	repo.packageRefs = NewPackageRefListFromPackageList(list)
	// free up package list, we don't need it after this point
	list = nil

	progress.Printf("Download queue: %d items (%s)\n", count, utils.HumanBytes(downloadSize))

	progress.InitBar(downloadSize, true)

	// Download all package files
	ch := make(chan error, len(queued))

	for _, task := range queued {
		d.DownloadWithChecksum(repo.PackageURL(task.RepoURI).String(), task.DestinationPath, ch, task.Checksums, ignoreMismatch)
	}

	// We don't need queued after this point
	queued = nil

	// Wait for all downloads to finish
	errors := make([]string, 0)

	for count > 0 {
		err = <-ch
		if err != nil {
			errors = append(errors, err.Error())
		}
		count--
	}

	progress.ShutdownBar()

	if len(errors) > 0 {
		return fmt.Errorf("download errors:\n  %s\n", strings.Join(errors, "\n  "))
	}

	repo.LastDownloadDate = time.Now()

	return nil
}
コード例 #12
0
// VerifyDependencies looks for missing dependencies in package list.
//
// Analysis would be peformed for each architecture, in specified sources
func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) {
	missing := make([]Dependency, 0, 128)

	if progress != nil {
		progress.InitBar(int64(l.Len())*int64(len(architectures)), false)
	}

	for _, arch := range architectures {
		cache := make(map[string]bool, 2048)

		for _, p := range l.packages {
			if progress != nil {
				progress.AddBar(1)
			}

			if !p.MatchesArchitecture(arch) {
				continue
			}

			for _, dep := range p.GetDependencies(options) {
				variants, err := ParseDependencyVariants(dep)
				if err != nil {
					return nil, fmt.Errorf("unable to process package %s: %s", p, err)
				}

				variants = depSliceDeduplicate(variants)

				variantsMissing := make([]Dependency, 0, len(variants))
				missingCount := 0

				for _, dep := range variants {
					if dep.Architecture == "" {
						dep.Architecture = arch
					}

					hash := dep.Hash()
					r, ok := cache[hash]
					if ok {
						if !r {
							missingCount++
						}
						continue
					}

					if sources.Search(dep, false) == nil {
						variantsMissing = append(variantsMissing, dep)
						missingCount++
					} else {
						cache[hash] = true
					}
				}

				if options&DepFollowAllVariants == DepFollowAllVariants {
					missing = append(missing, variantsMissing...)
					for _, dep := range variantsMissing {
						cache[dep.Hash()] = false
					}
				} else {
					if missingCount == len(variants) {
						missing = append(missing, variantsMissing...)
						for _, dep := range variantsMissing {
							cache[dep.Hash()] = false
						}
					}
				}
			}
		}
	}

	if progress != nil {
		progress.ShutdownBar()
	}

	return missing, nil
}
コード例 #13
0
ファイル: list.go プロジェクト: liftup/aptly
// VerifyDependencies looks for missing dependencies in package list.
//
// Analysis would be peformed for each architecture, in specified sources
func (l *PackageList) VerifyDependencies(options int, architectures []string, sources *PackageList, progress aptly.Progress) ([]Dependency, error) {
	l.PrepareIndex()
	missing := make([]Dependency, 0, 128)

	if progress != nil {
		progress.InitBar(int64(l.Len())*int64(len(architectures)), false)
	}

	for _, arch := range architectures {
		cache := make(map[string]bool, 2048)

		for _, p := range l.packagesIndex {
			if progress != nil {
				progress.AddBar(1)
			}

			if !p.MatchesArchitecture(arch) {
				continue
			}

			for _, dep := range p.GetDependencies(options) {
				variants, err := ParseDependencyVariants(dep)
				if err != nil {
					return nil, fmt.Errorf("unable to process package %s: %s", p, err)
				}

				variants = depSliceDeduplicate(variants)

				variantsMissing := make([]Dependency, 0, len(variants))

				for _, dep := range variants {
					if dep.Architecture == "" {
						dep.Architecture = arch
					}

					hash := dep.Hash()
					satisfied, ok := cache[hash]
					if !ok {
						satisfied = sources.Search(dep, false) != nil
						cache[hash] = satisfied
					}

					if !satisfied && !ok {
						variantsMissing = append(variantsMissing, dep)
					}

					if satisfied && options&DepFollowAllVariants == 0 {
						variantsMissing = nil
						break
					}
				}

				missing = append(missing, variantsMissing...)
			}
		}
	}

	if progress != nil {
		progress.ShutdownBar()
	}

	return missing, nil
}