// Remove removes published repository, cleaning up directories, files func (collection *PublishedRepoCollection) Remove(publishedStorageProvider aptly.PublishedStorageProvider, storage, prefix, distribution string, collectionFactory *CollectionFactory, progress aptly.Progress) error { repo, err := collection.ByStoragePrefixDistribution(storage, prefix, distribution) if err != nil { return err } removePrefix := true removePoolComponents := repo.Components() cleanComponents := []string{} repoPosition := -1 for i, r := range collection.list { if r == repo { repoPosition = i continue } if r.Storage == repo.Storage && r.Prefix == repo.Prefix { removePrefix = false rComponents := r.Components() for _, component := range rComponents { if utils.StrSliceHasItem(removePoolComponents, component) { removePoolComponents = utils.StrSlicesSubstract(removePoolComponents, []string{component}) cleanComponents = append(cleanComponents, component) } } } } err = repo.RemoveFiles(publishedStorageProvider, removePrefix, removePoolComponents, progress) if err != nil { return err } collection.list[len(collection.list)-1], collection.list[repoPosition], collection.list = nil, collection.list[len(collection.list)-1], collection.list[:len(collection.list)-1] if len(cleanComponents) > 0 { err = collection.CleanupPrefixComponentFiles(repo.Prefix, cleanComponents, publishedStorageProvider.GetPublishedStorage(storage), collectionFactory, progress) if err != nil { return err } } err = collection.db.Delete(repo.Key()) if err != nil { return err } for _, component := range repo.Components() { err = collection.db.Delete(repo.RefKey(component)) if err != nil { return err } } return nil }
// CleanupPrefixComponentFiles removes all unreferenced files in published storage under prefix/component pair func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(prefix, component string, publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, progress aptly.Progress) error { var err error referencedFiles := []string{} if progress != nil { progress.Printf("Cleaning up prefix %#v component %#v...\n", prefix, component) } for _, r := range collection.list { if r.Prefix == prefix && r.Component == component { err = collection.LoadComplete(r, collectionFactory) if err != nil { return err } packageList, err := NewPackageListFromRefList(r.RefList(), collectionFactory.PackageCollection(), progress) if err != nil { return err } packageList.ForEach(func(p *Package) error { poolDir, err := p.PoolDirectory() if err != nil { return err } for _, f := range p.Files() { referencedFiles = append(referencedFiles, filepath.Join(poolDir, f.Filename)) } return nil }) } } sort.Strings(referencedFiles) rootPath := filepath.Join(prefix, "pool", component) existingFiles, err := publishedStorage.Filelist(rootPath) if err != nil { return err } sort.Strings(existingFiles) filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles) for _, file := range filesToDelete { err = publishedStorage.Remove(filepath.Join(rootPath, file)) if err != nil { return err } } return nil }
// Fetch updates information about repository func (repo *RemoteRepo) Fetch(d aptly.Downloader, verifier utils.Verifier) error { var ( release, inrelease, releasesig *os.File err error ) if verifier == nil { // 0. Just download release file to temporary URL release, err = http.DownloadTemp(d, repo.ReleaseURL("Release").String()) if err != nil { return err } } else { // 1. try InRelease file inrelease, err = http.DownloadTemp(d, repo.ReleaseURL("InRelease").String()) if err != nil { goto splitsignature } defer inrelease.Close() _, err = verifier.VerifyClearsigned(inrelease, true) if err != nil { goto splitsignature } inrelease.Seek(0, 0) release, err = verifier.ExtractClearsigned(inrelease) if err != nil { goto splitsignature } goto ok splitsignature: // 2. try Release + Release.gpg release, err = http.DownloadTemp(d, repo.ReleaseURL("Release").String()) if err != nil { return err } releasesig, err = http.DownloadTemp(d, repo.ReleaseURL("Release.gpg").String()) if err != nil { return err } err = verifier.VerifyDetachedSignature(releasesig, release) if err != nil { return err } _, err = release.Seek(0, 0) if err != nil { return err } } ok: defer release.Close() sreader := NewControlFileReader(release) stanza, err := sreader.ReadStanza() if err != nil { return err } if !repo.IsFlat() { architectures := strings.Split(stanza["Architectures"], " ") sort.Strings(architectures) // "source" architecture is never present, despite Release file claims architectures = utils.StrSlicesSubstract(architectures, []string{"source"}) if len(repo.Architectures) == 0 { repo.Architectures = architectures } else { err = utils.StringsIsSubset(repo.Architectures, architectures, fmt.Sprintf("architecture %%s not available in repo %s", repo)) if err != nil { return err } } components := strings.Split(stanza["Components"], " ") if strings.Contains(repo.Distribution, "/") { distributionLast := path.Base(repo.Distribution) + "/" for i := range components { if strings.HasPrefix(components[i], distributionLast) { components[i] = components[i][len(distributionLast):] } } } if len(repo.Components) == 0 { repo.Components = components } else if !repo.SkipComponentCheck { err = utils.StringsIsSubset(repo.Components, components, fmt.Sprintf("component %%s not available in repo %s, use -force-components to override", repo)) if err != nil { return err } } } repo.ReleaseFiles = make(map[string]utils.ChecksumInfo) parseSums := func(field string, setter func(sum *utils.ChecksumInfo, data string)) error { for _, line := range strings.Split(stanza[field], "\n") { line = strings.TrimSpace(line) if line == "" { continue } parts := strings.Fields(line) if len(parts) != 3 { return fmt.Errorf("unparseable hash sum line: %#v", line) } var size int64 size, err = strconv.ParseInt(parts[1], 10, 64) if err != nil { return fmt.Errorf("unable to parse size: %s", err) } sum := repo.ReleaseFiles[parts[2]] sum.Size = size setter(&sum, parts[0]) repo.ReleaseFiles[parts[2]] = sum } delete(stanza, field) return nil } err = parseSums("MD5Sum", func(sum *utils.ChecksumInfo, data string) { sum.MD5 = data }) if err != nil { return err } err = parseSums("SHA1", func(sum *utils.ChecksumInfo, data string) { sum.SHA1 = data }) if err != nil { return err } err = parseSums("SHA256", func(sum *utils.ChecksumInfo, data string) { sum.SHA256 = data }) if err != nil { return err } delete(stanza, "SHA512") repo.Meta = stanza return nil }
// CleanupPrefixComponentFiles removes all unreferenced files in published storage under prefix/component pair func (collection *PublishedRepoCollection) CleanupPrefixComponentFiles(prefix string, components []string, publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, progress aptly.Progress) error { var err error referencedFiles := map[string][]string{} if progress != nil { progress.Printf("Cleaning up prefix %#v components %s...\n", prefix, strings.Join(components, ", ")) } for _, r := range collection.list { if r.Prefix == prefix { matches := false repoComponents := r.Components() for _, component := range components { if utils.StrSliceHasItem(repoComponents, component) { matches = true break } } if !matches { continue } err = collection.LoadComplete(r, collectionFactory) if err != nil { return err } for _, component := range components { if utils.StrSliceHasItem(repoComponents, component) { packageList, err := NewPackageListFromRefList(r.RefList(component), collectionFactory.PackageCollection(), progress) if err != nil { return err } packageList.ForEach(func(p *Package) error { poolDir, err := p.PoolDirectory() if err != nil { return err } for _, f := range p.Files() { referencedFiles[component] = append(referencedFiles[component], filepath.Join(poolDir, f.Filename)) } return nil }) } } } } for _, component := range components { sort.Strings(referencedFiles[component]) rootPath := filepath.Join(prefix, "pool", component) existingFiles, err := publishedStorage.Filelist(rootPath) if err != nil { return err } sort.Strings(existingFiles) filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles[component]) for _, file := range filesToDelete { err = publishedStorage.Remove(filepath.Join(rootPath, file)) if err != nil { return err } } } return nil }
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider, collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress, forceOverwrite bool) error { publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage) err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool")) if err != nil { return err } basePath := filepath.Join(p.Prefix, "dists", p.Distribution) err = publishedStorage.MkDir(basePath) if err != nil { return err } if progress != nil { progress.Printf("Loading packages...\n") } lists := map[string]*PackageList{} for component := range p.sourceItems { // Load all packages lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } } if !p.rePublishing { if len(p.Architectures) == 0 { for _, list := range lists { p.Architectures = append(p.Architectures, list.Architectures(true)...) } } if len(p.Architectures) == 0 { return fmt.Errorf("unable to figure out list of architectures, please supply explicit list") } sort.Strings(p.Architectures) p.Architectures = utils.StrSliceDeduplicate(p.Architectures) } var suffix string if p.rePublishing { suffix = ".tmp" } if progress != nil { progress.Printf("Generating metadata files and linking package files...\n") } var tempDir string tempDir, err = ioutil.TempDir(os.TempDir(), "aptly") if err != nil { return err } defer os.RemoveAll(tempDir) indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix) for component, list := range lists { hadUdebs := false // For all architectures, pregenerate packages/sources files for _, arch := range p.Architectures { indexes.PackageIndex(component, arch, false) } if progress != nil { progress.InitBar(int64(list.Len()), false) } list.PrepareIndex() contentIndexes := map[string]*ContentsIndex{} err = list.ForEachIndexed(func(pkg *Package) error { if progress != nil { progress.AddBar(1) } matches := false for _, arch := range p.Architectures { if pkg.MatchesArchitecture(arch) { matches = true break } } if matches { hadUdebs = hadUdebs || pkg.IsUdeb err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite) if err != nil { return err } } for _, arch := range p.Architectures { if pkg.MatchesArchitecture(arch) { var bufWriter *bufio.Writer if !p.SkipContents { key := fmt.Sprintf("%s-%v", arch, pkg.IsUdeb) contentIndex := contentIndexes[key] if contentIndex == nil { contentIndex = NewContentsIndex() contentIndexes[key] = contentIndex } contentIndex.Push(pkg, packagePool) } bufWriter, err = indexes.PackageIndex(component, arch, pkg.IsUdeb).BufWriter() if err != nil { return err } err = pkg.Stanza().WriteTo(bufWriter, pkg.IsSource, false) if err != nil { return err } err = bufWriter.WriteByte('\n') if err != nil { return err } } } pkg.files = nil pkg.deps = nil pkg.extra = nil pkg.contents = nil return nil }) if err != nil { return fmt.Errorf("unable to process packages: %s", err) } for _, arch := range p.Architectures { for _, udeb := range []bool{true, false} { index := contentIndexes[fmt.Sprintf("%s-%v", arch, udeb)] if index == nil || index.Empty() { continue } bufWriter, err := indexes.ContentsIndex(component, arch, udeb).BufWriter() if err != nil { return fmt.Errorf("unable to generate contents index: %v", err) } _, err = index.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to generate contents index: %v", err) } } } if progress != nil { progress.ShutdownBar() } udebs := []bool{false} if hadUdebs { udebs = append(udebs, true) // For all architectures, pregenerate .udeb indexes for _, arch := range p.Architectures { indexes.PackageIndex(component, arch, true) } } // For all architectures, generate Release files for _, arch := range p.Architectures { for _, udeb := range udebs { release := make(Stanza) release["Archive"] = p.Distribution release["Architecture"] = arch release["Component"] = component release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() var bufWriter *bufio.Writer bufWriter, err = indexes.ReleaseIndex(component, arch, udeb).BufWriter() if err != nil { return fmt.Errorf("unable to get ReleaseIndex writer: %s", err) } err = release.WriteTo(bufWriter, false, true) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } } } } if progress != nil { progress.Printf("Finalizing metadata files...\n") } err = indexes.FinalizeAll(progress) if err != nil { return err } release := make(Stanza) release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() release["Suite"] = p.Distribution release["Codename"] = p.Distribution release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST") release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ") release["Description"] = " Generated by aptly\n" release["MD5Sum"] = "" release["SHA1"] = "" release["SHA256"] = "" release["SHA512"] = "" release["Components"] = strings.Join(p.Components(), " ") for path, info := range indexes.generatedFiles { release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) release["SHA512"] += fmt.Sprintf(" %s %8d %s\n", info.SHA512, info.Size, path) } releaseFile := indexes.ReleaseFile() bufWriter, err := releaseFile.BufWriter() if err != nil { return err } err = release.WriteTo(bufWriter, false, true) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } // Signing files might output to console, so flush progress writer first if progress != nil { progress.Flush() } err = releaseFile.Finalize(signer) if err != nil { return err } err = indexes.RenameFiles() if err != nil { return err } return nil }
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress) error { err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool")) if err != nil { return err } basePath := filepath.Join(p.Prefix, "dists", p.Distribution) err = publishedStorage.MkDir(basePath) if err != nil { return err } if progress != nil { progress.Printf("Loading packages...\n") } // Load all packages list, err := NewPackageListFromRefList(p.RefList(), collectionFactory.PackageCollection(), progress) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } if list.Len() == 0 { return fmt.Errorf("source is empty") } if !p.rePublishing { if len(p.Architectures) == 0 { p.Architectures = list.Architectures(true) } if len(p.Architectures) == 0 { return fmt.Errorf("unable to figure out list of architectures, please supply explicit list") } sort.Strings(p.Architectures) } var suffix string if p.rePublishing { suffix = ".tmp" } generatedFiles := map[string]utils.ChecksumInfo{} renameMap := map[string]string{} if progress != nil { progress.Printf("Generating metadata files and linking package files...\n") } // For all architectures, generate release file for _, arch := range p.Architectures { if progress != nil { progress.InitBar(int64(list.Len()), false) } var relativePath string if arch == "source" { relativePath = filepath.Join(p.Component, "source", "Sources") } else { relativePath = filepath.Join(p.Component, fmt.Sprintf("binary-%s", arch), "Packages") } err = publishedStorage.MkDir(filepath.Dir(filepath.Join(basePath, relativePath))) if err != nil { return err } var packagesFile *os.File packagesFile, err = publishedStorage.CreateFile(filepath.Join(basePath, relativePath+suffix)) if err != nil { return fmt.Errorf("unable to creates Packages file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, relativePath+suffix)] = filepath.Join(basePath, relativePath) } bufWriter := bufio.NewWriter(packagesFile) err = list.ForEach(func(pkg *Package) error { if progress != nil { progress.AddBar(1) } if pkg.MatchesArchitecture(arch) { err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, p.Component) if err != nil { return err } err = pkg.Stanza().WriteTo(bufWriter) if err != nil { return err } err = bufWriter.WriteByte('\n') if err != nil { return err } pkg.files = nil pkg.deps = nil pkg.extra = nil } return nil }) if err != nil { return fmt.Errorf("unable to process packages: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to write Packages file: %s", err) } err = utils.CompressFile(packagesFile) if err != nil { return fmt.Errorf("unable to compress Packages files: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, relativePath+suffix+".gz")] = filepath.Join(basePath, relativePath+".gz") renameMap[filepath.Join(basePath, relativePath+suffix+".bz2")] = filepath.Join(basePath, relativePath+".bz2") } packagesFile.Close() var checksumInfo utils.ChecksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix)) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath] = checksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix+".gz")) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath+".gz"] = checksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix+".bz2")) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath+".bz2"] = checksumInfo if progress != nil { progress.ShutdownBar() } } release := make(Stanza) if p.Origin == "" { release["Origin"] = p.Prefix + " " + p.Distribution } else { release["Origin"] = p.Origin } if p.Label == "" { release["Label"] = p.Prefix + " " + p.Distribution } else { release["Label"] = p.Label } release["Codename"] = p.Distribution release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST") release["Components"] = p.Component release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ") release["Description"] = " Generated by aptly\n" release["MD5Sum"] = "\n" release["SHA1"] = "\n" release["SHA256"] = "\n" for path, info := range generatedFiles { release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) } releaseFile, err := publishedStorage.CreateFile(filepath.Join(basePath, "Release"+suffix)) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, "Release"+suffix)] = filepath.Join(basePath, "Release") } bufWriter := bufio.NewWriter(releaseFile) err = release.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } releaseFilename := releaseFile.Name() releaseFile.Close() // Signing files might output to console, so flush progress writer first if progress != nil { progress.Flush() } if signer != nil { err = signer.DetachedSign(releaseFilename, releaseFilename+".gpg") if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } err = signer.ClearSign(releaseFilename, filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix)) if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, "Release"+suffix+".gpg")] = filepath.Join(basePath, "Release.gpg") renameMap[filepath.Join(basePath, "InRelease"+suffix)] = filepath.Join(basePath, "InRelease") } } for oldName, newName := range renameMap { err = publishedStorage.RenameFile(oldName, newName) if err != nil { return fmt.Errorf("unable to rename: %s", err) } } return nil }
// aptly db cleanup func aptlyDbCleanup(cmd *commander.Command, args []string) error { var err error if len(args) != 0 { cmd.Usage() return err } // collect information about references packages... existingPackageRefs := deb.NewPackageRefList() context.Progress().Printf("Loading mirrors, local repos and snapshots...\n") err = context.CollectionFactory().RemoteRepoCollection().ForEach(func(repo *deb.RemoteRepo) error { err := context.CollectionFactory().RemoteRepoCollection().LoadComplete(repo) if err != nil { return err } if repo.RefList() != nil { existingPackageRefs = existingPackageRefs.Merge(repo.RefList(), false) } return nil }) if err != nil { return err } err = context.CollectionFactory().LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error { err := context.CollectionFactory().LocalRepoCollection().LoadComplete(repo) if err != nil { return err } if repo.RefList() != nil { existingPackageRefs = existingPackageRefs.Merge(repo.RefList(), false) } return nil }) if err != nil { return err } err = context.CollectionFactory().SnapshotCollection().ForEach(func(snapshot *deb.Snapshot) error { err := context.CollectionFactory().SnapshotCollection().LoadComplete(snapshot) if err != nil { return err } existingPackageRefs = existingPackageRefs.Merge(snapshot.RefList(), false) return nil }) if err != nil { return err } // ... and compare it to the list of all packages context.Progress().Printf("Loading list of all packages...\n") allPackageRefs := context.CollectionFactory().PackageCollection().AllPackageRefs() toDelete := allPackageRefs.Substract(existingPackageRefs) // delete packages that are no longer referenced context.Progress().Printf("Deleting unreferenced packages (%d)...\n", toDelete.Len()) // database can't err as collection factory already constructed db, _ := context.Database() db.StartBatch() err = toDelete.ForEach(func(ref []byte) error { return context.CollectionFactory().PackageCollection().DeleteByKey(ref) }) if err != nil { return err } err = db.FinishBatch() if err != nil { return fmt.Errorf("unable to write to DB: %s", err) } // now, build a list of files that should be present in Repository (package pool) context.Progress().Printf("Building list of files referenced by packages...\n") referencedFiles := make([]string, 0, existingPackageRefs.Len()) context.Progress().InitBar(int64(existingPackageRefs.Len()), false) err = existingPackageRefs.ForEach(func(key []byte) error { pkg, err2 := context.CollectionFactory().PackageCollection().ByKey(key) if err2 != nil { return err2 } paths, err2 := pkg.FilepathList(context.PackagePool()) if err2 != nil { return err2 } referencedFiles = append(referencedFiles, paths...) context.Progress().AddBar(1) return nil }) if err != nil { return err } sort.Strings(referencedFiles) context.Progress().ShutdownBar() // build a list of files in the package pool context.Progress().Printf("Building list of files in package pool...\n") existingFiles, err := context.PackagePool().FilepathList(context.Progress()) if err != nil { return fmt.Errorf("unable to collect file paths: %s", err) } // find files which are in the pool but not referenced by packages filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles) // delete files that are no longer referenced context.Progress().Printf("Deleting unreferenced files (%d)...\n", len(filesToDelete)) if len(filesToDelete) > 0 { context.Progress().InitBar(int64(len(filesToDelete)), false) var size, totalSize int64 for _, file := range filesToDelete { size, err = context.PackagePool().Remove(file) if err != nil { return err } context.Progress().AddBar(1) totalSize += size } context.Progress().ShutdownBar() context.Progress().Printf("Disk space freed: %s...\n", utils.HumanBytes(totalSize)) } context.Progress().Printf("Compacting database...\n") err = db.CompactDB() return err }
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorage aptly.PublishedStorage, packageCollection *PackageCollection, signer utils.Signer) error { err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool")) if err != nil { return err } basePath := filepath.Join(p.Prefix, "dists", p.Distribution) err = publishedStorage.MkDir(basePath) if err != nil { return err } // Load all packages list, err := NewPackageListFromRefList(p.snapshot.RefList(), packageCollection) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } if list.Len() == 0 { return fmt.Errorf("snapshot is empty") } if len(p.Architectures) == 0 { p.Architectures = list.Architectures(true) } if len(p.Architectures) == 0 { return fmt.Errorf("unable to figure out list of architectures, please supply explicit list") } sort.Strings(p.Architectures) generatedFiles := map[string]utils.ChecksumInfo{} // For all architectures, generate release file for _, arch := range p.Architectures { var relativePath string if arch == "source" { relativePath = filepath.Join(p.Component, "source", "Sources") } else { relativePath = filepath.Join(p.Component, fmt.Sprintf("binary-%s", arch), "Packages") } err = publishedStorage.MkDir(filepath.Dir(filepath.Join(basePath, relativePath))) if err != nil { return err } packagesFile, err := publishedStorage.CreateFile(filepath.Join(basePath, relativePath)) if err != nil { return fmt.Errorf("unable to creates Packages file: %s", err) } bufWriter := bufio.NewWriter(packagesFile) err = list.ForEach(func(pkg *Package) error { if pkg.MatchesArchitecture(arch) { err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, p.Component) if err != nil { return err } err = pkg.Stanza().WriteTo(bufWriter) if err != nil { return err } err = bufWriter.WriteByte('\n') if err != nil { return err } } return nil }) if err != nil { return fmt.Errorf("unable to process packages: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to write Packages file: %s", err) } err = utils.CompressFile(packagesFile) if err != nil { return fmt.Errorf("unable to compress Packages files: %s", err) } packagesFile.Close() checksumInfo, err := publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath)) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath] = checksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+".gz")) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath+".gz"] = checksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+".bz2")) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath+".bz2"] = checksumInfo } release := make(Stanza) release["Origin"] = p.Prefix + " " + p.Distribution release["Label"] = p.Prefix + " " + p.Distribution release["Codename"] = p.Distribution release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST") release["Components"] = p.Component release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ") release["Description"] = " Generated by aptly\n" release["MD5Sum"] = "\n" release["SHA1"] = "\n" release["SHA256"] = "\n" for path, info := range generatedFiles { release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) } releaseFile, err := publishedStorage.CreateFile(filepath.Join(basePath, "Release")) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } bufWriter := bufio.NewWriter(releaseFile) err = release.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } releaseFilename := releaseFile.Name() releaseFile.Close() if signer != nil { err = signer.DetachedSign(releaseFilename, releaseFilename+".gpg") if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } err = signer.ClearSign(releaseFilename, filepath.Join(filepath.Dir(releaseFilename), "InRelease")) if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } } return nil }
// aptly db cleanup func aptlyDbCleanup(cmd *commander.Command, args []string) error { var err error if len(args) != 0 { cmd.Usage() return err } // collect information about references packages... existingPackageRefs := debian.NewPackageRefList() context.downloader.GetProgress().Printf("Loading mirrors and snapshots...\n") repoCollection := debian.NewRemoteRepoCollection(context.database) err = repoCollection.ForEach(func(repo *debian.RemoteRepo) error { err := repoCollection.LoadComplete(repo) if err != nil { return err } existingPackageRefs = existingPackageRefs.Merge(repo.RefList(), false) return nil }) if err != nil { return err } snapshotCollection := debian.NewSnapshotCollection(context.database) err = snapshotCollection.ForEach(func(snapshot *debian.Snapshot) error { err := snapshotCollection.LoadComplete(snapshot) if err != nil { return err } existingPackageRefs = existingPackageRefs.Merge(snapshot.RefList(), false) return nil }) if err != nil { return err } // ... and compare it to the list of all packages context.downloader.GetProgress().Printf("Loading list of all packages...\n") packageCollection := debian.NewPackageCollection(context.database) allPackageRefs := packageCollection.AllPackageRefs() toDelete := allPackageRefs.Substract(existingPackageRefs) // delete packages that are no longer referenced context.downloader.GetProgress().Printf("Deleting unreferenced packages (%d)...\n", toDelete.Len()) context.database.StartBatch() err = toDelete.ForEach(func(ref []byte) error { return packageCollection.DeleteByKey(ref) }) if err != nil { return err } err = context.database.FinishBatch() if err != nil { return fmt.Errorf("unable to write to DB: %s", err) } // now, build a list of files that should be present in Repository (package pool) context.downloader.GetProgress().Printf("Building list of files referenced by packages...\n") referencedFiles := make([]string, 0, existingPackageRefs.Len()) context.downloader.GetProgress().InitBar(int64(existingPackageRefs.Len()), false) err = existingPackageRefs.ForEach(func(key []byte) error { pkg, err := packageCollection.ByKey(key) if err != nil { return err } paths, err := pkg.FilepathList(context.packageRepository) if err != nil { return err } referencedFiles = append(referencedFiles, paths...) context.downloader.GetProgress().AddBar(1) return nil }) if err != nil { return err } sort.Strings(referencedFiles) context.downloader.GetProgress().ShutdownBar() // build a list of files in the package pool context.downloader.GetProgress().Printf("Building list of files in package pool...\n") existingFiles, err := context.packageRepository.PoolFilepathList(context.downloader.GetProgress()) if err != nil { return fmt.Errorf("unable to collect file paths: %s", err) } // find files which are in the pool but not referenced by packages filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles) // delete files that are no longer referenced context.downloader.GetProgress().Printf("Deleting unreferenced files (%d)...\n", len(filesToDelete)) if len(filesToDelete) > 0 { context.downloader.GetProgress().InitBar(int64(len(filesToDelete)), false) totalSize := int64(0) for _, file := range filesToDelete { size, err := context.packageRepository.PoolRemove(file) if err != nil { return err } context.downloader.GetProgress().AddBar(1) totalSize += size } context.downloader.GetProgress().ShutdownBar() context.downloader.GetProgress().Printf("Disk space freed: %.2f GiB...\n", float64(totalSize)/1024.0/1024.0/1024.0) } return err }
// aptly db cleanup func aptlyDbCleanup(cmd *commander.Command, args []string) error { var err error if len(args) != 0 { cmd.Usage() return commander.ErrCommandError } verbose := context.Flags().Lookup("verbose").Value.Get().(bool) dryRun := context.Flags().Lookup("dry-run").Value.Get().(bool) // collect information about references packages... existingPackageRefs := deb.NewPackageRefList() // used only in verbose mode to report package use source packageRefSources := map[string][]string{} context.Progress().ColoredPrintf("@{w!}Loading mirrors, local repos, snapshots and published repos...@|") if verbose { context.Progress().ColoredPrintf("@{y}Loading mirrors:@|") } err = context.CollectionFactory().RemoteRepoCollection().ForEach(func(repo *deb.RemoteRepo) error { if verbose { context.Progress().ColoredPrintf("- @{g}%s@|", repo.Name) } err := context.CollectionFactory().RemoteRepoCollection().LoadComplete(repo) if err != nil { return err } if repo.RefList() != nil { existingPackageRefs = existingPackageRefs.Merge(repo.RefList(), false, true) if verbose { description := fmt.Sprintf("mirror %s", repo.Name) repo.RefList().ForEach(func(key []byte) error { packageRefSources[string(key)] = append(packageRefSources[string(key)], description) return nil }) } } return nil }) if err != nil { return err } if verbose { context.Progress().ColoredPrintf("@{y}Loading local repos:@|") } err = context.CollectionFactory().LocalRepoCollection().ForEach(func(repo *deb.LocalRepo) error { if verbose { context.Progress().ColoredPrintf("- @{g}%s@|", repo.Name) } err := context.CollectionFactory().LocalRepoCollection().LoadComplete(repo) if err != nil { return err } if repo.RefList() != nil { existingPackageRefs = existingPackageRefs.Merge(repo.RefList(), false, true) if verbose { description := fmt.Sprintf("local repo %s", repo.Name) repo.RefList().ForEach(func(key []byte) error { packageRefSources[string(key)] = append(packageRefSources[string(key)], description) return nil }) } } return nil }) if err != nil { return err } if verbose { context.Progress().ColoredPrintf("@{y}Loading snapshots:@|") } err = context.CollectionFactory().SnapshotCollection().ForEach(func(snapshot *deb.Snapshot) error { if verbose { context.Progress().ColoredPrintf("- @{g}%s@|", snapshot.Name) } err := context.CollectionFactory().SnapshotCollection().LoadComplete(snapshot) if err != nil { return err } existingPackageRefs = existingPackageRefs.Merge(snapshot.RefList(), false, true) if verbose { description := fmt.Sprintf("snapshot %s", snapshot.Name) snapshot.RefList().ForEach(func(key []byte) error { packageRefSources[string(key)] = append(packageRefSources[string(key)], description) return nil }) } return nil }) if err != nil { return err } if verbose { context.Progress().ColoredPrintf("@{y}Loading published repositories:@|") } err = context.CollectionFactory().PublishedRepoCollection().ForEach(func(published *deb.PublishedRepo) error { if verbose { context.Progress().ColoredPrintf("- @{g}%s:%s/%s{|}", published.Storage, published.Prefix, published.Distribution) } if published.SourceKind != "local" { return nil } err := context.CollectionFactory().PublishedRepoCollection().LoadComplete(published, context.CollectionFactory()) if err != nil { return err } for _, component := range published.Components() { existingPackageRefs = existingPackageRefs.Merge(published.RefList(component), false, true) if verbose { description := fmt.Sprintf("published repository %s:%s/%s component %s", published.Storage, published.Prefix, published.Distribution, component) published.RefList(component).ForEach(func(key []byte) error { packageRefSources[string(key)] = append(packageRefSources[string(key)], description) return nil }) } } return nil }) if err != nil { return err } // ... and compare it to the list of all packages context.Progress().ColoredPrintf("@{w!}Loading list of all packages...@|") allPackageRefs := context.CollectionFactory().PackageCollection().AllPackageRefs() toDelete := allPackageRefs.Substract(existingPackageRefs) // delete packages that are no longer referenced context.Progress().ColoredPrintf("@{r!}Deleting unreferenced packages (%d)...@|", toDelete.Len()) // database can't err as collection factory already constructed db, _ := context.Database() if toDelete.Len() > 0 { if verbose { context.Progress().ColoredPrintf("@{r}List of package keys to delete:@|") err = toDelete.ForEach(func(ref []byte) error { context.Progress().ColoredPrintf(" - @{r}%s@|", string(ref)) return nil }) if err != nil { return err } } if !dryRun { db.StartBatch() err = toDelete.ForEach(func(ref []byte) error { return context.CollectionFactory().PackageCollection().DeleteByKey(ref) }) if err != nil { return err } err = db.FinishBatch() if err != nil { return fmt.Errorf("unable to write to DB: %s", err) } } else { context.Progress().ColoredPrintf("@{y!}Skipped deletion, as -dry-run has been requested.@|") } } // now, build a list of files that should be present in Repository (package pool) context.Progress().ColoredPrintf("@{w!}Building list of files referenced by packages...@|") referencedFiles := make([]string, 0, existingPackageRefs.Len()) context.Progress().InitBar(int64(existingPackageRefs.Len()), false) err = existingPackageRefs.ForEach(func(key []byte) error { pkg, err2 := context.CollectionFactory().PackageCollection().ByKey(key) if err2 != nil { tail := "" if verbose { tail = fmt.Sprintf(" (sources: %s)", strings.Join(packageRefSources[string(key)], ", ")) } if dryRun { context.Progress().ColoredPrintf("@{r!}Unresolvable package reference, skipping (-dry-run): %s: %s%s", string(key), err2, tail) return nil } return fmt.Errorf("unable to load package %s: %s%s", string(key), err2, tail) } paths, err2 := pkg.FilepathList(context.PackagePool()) if err2 != nil { return err2 } referencedFiles = append(referencedFiles, paths...) context.Progress().AddBar(1) return nil }) if err != nil { return err } sort.Strings(referencedFiles) context.Progress().ShutdownBar() // build a list of files in the package pool context.Progress().ColoredPrintf("@{w!}Building list of files in package pool...@|") existingFiles, err := context.PackagePool().FilepathList(context.Progress()) if err != nil { return fmt.Errorf("unable to collect file paths: %s", err) } // find files which are in the pool but not referenced by packages filesToDelete := utils.StrSlicesSubstract(existingFiles, referencedFiles) // delete files that are no longer referenced context.Progress().ColoredPrintf("@{r!}Deleting unreferenced files (%d)...@|", len(filesToDelete)) if len(filesToDelete) > 0 { if verbose { context.Progress().ColoredPrintf("@{r}List of files to be deleted:@|") for _, file := range filesToDelete { context.Progress().ColoredPrintf(" - @{r}%s@|", file) } } if !dryRun { context.Progress().InitBar(int64(len(filesToDelete)), false) var size, totalSize int64 for _, file := range filesToDelete { size, err = context.PackagePool().Remove(file) if err != nil { return err } context.Progress().AddBar(1) totalSize += size } context.Progress().ShutdownBar() context.Progress().ColoredPrintf("@{w!}Disk space freed: %s...@|", utils.HumanBytes(totalSize)) } else { context.Progress().ColoredPrintf("@{y!}Skipped file deletion, as -dry-run has been requested.@|") } } if !dryRun { context.Progress().ColoredPrintf("@{w!}Compacting database...@|") err = db.CompactDB() } else { context.Progress().ColoredPrintf("@{y!}Skipped DB compaction, as -dry-run has been requested.@|") } return err }