// RemoveFiles removes files that were created by Publish // // It can remove prefix fully, and part of pool (for specific component) func (p *PublishedRepo) RemoveFiles(publishedStorageProvider aptly.PublishedStorageProvider, removePrefix bool, removePoolComponents []string, progress aptly.Progress) error { publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage) // I. Easy: remove whole prefix (meta+packages) if removePrefix { err := publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "dists"), progress) if err != nil { return err } return publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "pool"), progress) } // II. Medium: remove metadata, it can't be shared as prefix/distribution as unique err := publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "dists", p.Distribution), progress) if err != nil { return err } // III. Complex: there are no other publishes with the same prefix + component for _, component := range removePoolComponents { err = publishedStorage.RemoveDirs(filepath.Join(p.Prefix, "pool", component), progress) if err != nil { return err } } return nil }
// Remove removes published repository, cleaning up directories, files func (collection *PublishedRepoCollection) Remove(publishedStorageProvider aptly.PublishedStorageProvider, storage, prefix, distribution string, collectionFactory *CollectionFactory, progress aptly.Progress) error { repo, err := collection.ByStoragePrefixDistribution(storage, prefix, distribution) if err != nil { return err } removePrefix := true removePoolComponents := repo.Components() cleanComponents := []string{} repoPosition := -1 for i, r := range collection.list { if r == repo { repoPosition = i continue } if r.Storage == repo.Storage && r.Prefix == repo.Prefix { removePrefix = false rComponents := r.Components() for _, component := range rComponents { if utils.StrSliceHasItem(removePoolComponents, component) { removePoolComponents = utils.StrSlicesSubstract(removePoolComponents, []string{component}) cleanComponents = append(cleanComponents, component) } } } } err = repo.RemoveFiles(publishedStorageProvider, removePrefix, removePoolComponents, progress) if err != nil { return err } collection.list[len(collection.list)-1], collection.list[repoPosition], collection.list = nil, collection.list[len(collection.list)-1], collection.list[:len(collection.list)-1] if len(cleanComponents) > 0 { err = collection.CleanupPrefixComponentFiles(repo.Prefix, cleanComponents, publishedStorageProvider.GetPublishedStorage(storage), collectionFactory, progress) if err != nil { return err } } err = collection.db.Delete(repo.Key()) if err != nil { return err } for _, component := range repo.Components() { err = collection.db.Delete(repo.RefKey(component)) if err != nil { return err } } return nil }
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider, collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress, forceOverwrite bool) error { publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage) err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool")) if err != nil { return err } basePath := filepath.Join(p.Prefix, "dists", p.Distribution) err = publishedStorage.MkDir(basePath) if err != nil { return err } if progress != nil { progress.Printf("Loading packages...\n") } lists := map[string]*PackageList{} for component := range p.sourceItems { // Load all packages lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } } if !p.rePublishing { if len(p.Architectures) == 0 { for _, list := range lists { p.Architectures = append(p.Architectures, list.Architectures(true)...) } } if len(p.Architectures) == 0 { return fmt.Errorf("unable to figure out list of architectures, please supply explicit list") } sort.Strings(p.Architectures) p.Architectures = utils.StrSliceDeduplicate(p.Architectures) } var suffix string if p.rePublishing { suffix = ".tmp" } if progress != nil { progress.Printf("Generating metadata files and linking package files...\n") } var tempDir string tempDir, err = ioutil.TempDir(os.TempDir(), "aptly") if err != nil { return err } defer os.RemoveAll(tempDir) indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix) for component, list := range lists { hadUdebs := false // For all architectures, pregenerate packages/sources files for _, arch := range p.Architectures { indexes.PackageIndex(component, arch, false) } if progress != nil { progress.InitBar(int64(list.Len()), false) } list.PrepareIndex() contentIndexes := map[string]*ContentsIndex{} err = list.ForEachIndexed(func(pkg *Package) error { if progress != nil { progress.AddBar(1) } matches := false for _, arch := range p.Architectures { if pkg.MatchesArchitecture(arch) { matches = true break } } if matches { hadUdebs = hadUdebs || pkg.IsUdeb err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite) if err != nil { return err } } for _, arch := range p.Architectures { if pkg.MatchesArchitecture(arch) { var bufWriter *bufio.Writer if !p.SkipContents { key := fmt.Sprintf("%s-%v", arch, pkg.IsUdeb) contentIndex := contentIndexes[key] if contentIndex == nil { contentIndex = NewContentsIndex() contentIndexes[key] = contentIndex } contentIndex.Push(pkg, packagePool) } bufWriter, err = indexes.PackageIndex(component, arch, pkg.IsUdeb).BufWriter() if err != nil { return err } err = pkg.Stanza().WriteTo(bufWriter, pkg.IsSource, false) if err != nil { return err } err = bufWriter.WriteByte('\n') if err != nil { return err } } } pkg.files = nil pkg.deps = nil pkg.extra = nil pkg.contents = nil return nil }) if err != nil { return fmt.Errorf("unable to process packages: %s", err) } for _, arch := range p.Architectures { for _, udeb := range []bool{true, false} { index := contentIndexes[fmt.Sprintf("%s-%v", arch, udeb)] if index == nil || index.Empty() { continue } bufWriter, err := indexes.ContentsIndex(component, arch, udeb).BufWriter() if err != nil { return fmt.Errorf("unable to generate contents index: %v", err) } _, err = index.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to generate contents index: %v", err) } } } if progress != nil { progress.ShutdownBar() } udebs := []bool{false} if hadUdebs { udebs = append(udebs, true) // For all architectures, pregenerate .udeb indexes for _, arch := range p.Architectures { indexes.PackageIndex(component, arch, true) } } // For all architectures, generate Release files for _, arch := range p.Architectures { for _, udeb := range udebs { release := make(Stanza) release["Archive"] = p.Distribution release["Architecture"] = arch release["Component"] = component release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() var bufWriter *bufio.Writer bufWriter, err = indexes.ReleaseIndex(component, arch, udeb).BufWriter() if err != nil { return fmt.Errorf("unable to get ReleaseIndex writer: %s", err) } err = release.WriteTo(bufWriter, false, true) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } } } } if progress != nil { progress.Printf("Finalizing metadata files...\n") } err = indexes.FinalizeAll(progress) if err != nil { return err } release := make(Stanza) release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() release["Suite"] = p.Distribution release["Codename"] = p.Distribution release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST") release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ") release["Description"] = " Generated by aptly\n" release["MD5Sum"] = "" release["SHA1"] = "" release["SHA256"] = "" release["SHA512"] = "" release["Components"] = strings.Join(p.Components(), " ") for path, info := range indexes.generatedFiles { release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) release["SHA512"] += fmt.Sprintf(" %s %8d %s\n", info.SHA512, info.Size, path) } releaseFile := indexes.ReleaseFile() bufWriter, err := releaseFile.BufWriter() if err != nil { return err } err = release.WriteTo(bufWriter, false, true) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } // Signing files might output to console, so flush progress writer first if progress != nil { progress.Flush() } err = releaseFile.Finalize(signer) if err != nil { return err } err = indexes.RenameFiles() if err != nil { return err } return nil }
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider, collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress, forceOverwrite bool) error { publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage) err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool")) if err != nil { return err } basePath := filepath.Join(p.Prefix, "dists", p.Distribution) err = publishedStorage.MkDir(basePath) if err != nil { return err } if progress != nil { progress.Printf("Loading packages...\n") } lists := map[string]*PackageList{} for component := range p.sourceItems { // Load all packages lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } } if !p.rePublishing { if len(p.Architectures) == 0 { for _, list := range lists { p.Architectures = append(p.Architectures, list.Architectures(true)...) } } if len(p.Architectures) == 0 { return fmt.Errorf("unable to figure out list of architectures, please supply explicit list") } sort.Strings(p.Architectures) p.Architectures = utils.StrSliceDeduplicate(p.Architectures) } var suffix string if p.rePublishing { suffix = ".tmp" } generatedFiles := map[string]utils.ChecksumInfo{} renameMap := map[string]string{} if progress != nil { progress.Printf("Generating metadata files and linking package files...\n") } var tempDir string tempDir, err = ioutil.TempDir(os.TempDir(), "aptly") if err != nil { return err } defer os.RemoveAll(tempDir) for component, list := range lists { var relativePath string // For all architectures, generate packages/sources files for _, arch := range p.Architectures { if progress != nil { progress.InitBar(int64(list.Len()), false) } if arch == "source" { relativePath = filepath.Join(component, "source", "Sources") } else { relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Packages") } err = publishedStorage.MkDir(filepath.Dir(filepath.Join(basePath, relativePath))) if err != nil { return err } var packagesFile *os.File packagesFileName := filepath.Join(tempDir, fmt.Sprintf("pkgs_%s_%s", component, arch)) packagesFile, err = os.Create(packagesFileName) if err != nil { return fmt.Errorf("unable to create temporary Packages file: %s", err) } bufWriter := bufio.NewWriter(packagesFile) err = list.ForEach(func(pkg *Package) error { if progress != nil { progress.AddBar(1) } if pkg.MatchesArchitecture(arch) { err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite) if err != nil { return err } err = pkg.Stanza().WriteTo(bufWriter) if err != nil { return err } err = bufWriter.WriteByte('\n') if err != nil { return err } pkg.files = nil pkg.deps = nil pkg.extra = nil } return nil }) if err != nil { return fmt.Errorf("unable to process packages: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to write Packages file: %s", err) } err = utils.CompressFile(packagesFile) if err != nil { return fmt.Errorf("unable to compress Packages files: %s", err) } packagesFile.Close() for _, ext := range []string{"", ".gz", ".bz2"} { var checksumInfo utils.ChecksumInfo checksumInfo, err = utils.ChecksumsForFile(packagesFileName + ext) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath+ext] = checksumInfo err = publishedStorage.PutFile(filepath.Join(basePath, relativePath+suffix+ext), packagesFileName+ext) if err != nil { return fmt.Errorf("unable to publish file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, relativePath+suffix+ext)] = filepath.Join(basePath, relativePath+ext) } } if progress != nil { progress.ShutdownBar() } } // For all architectures, generate Release files for _, arch := range p.Architectures { release := make(Stanza) release["Archive"] = p.Distribution release["Architecture"] = arch release["Component"] = component release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() if arch == "source" { relativePath = filepath.Join(component, "source", "Release") } else { relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Release") } var file *os.File fileName := filepath.Join(tempDir, fmt.Sprintf("release_%s_%s", component, arch)) file, err = os.Create(fileName) if err != nil { return fmt.Errorf("unable to create temporary Release file: %s", err) } bufWriter := bufio.NewWriter(file) err = release.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } file.Close() var checksumInfo utils.ChecksumInfo checksumInfo, err = utils.ChecksumsForFile(fileName) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath] = checksumInfo err = publishedStorage.PutFile(filepath.Join(basePath, relativePath+suffix), fileName) if err != nil { file.Close() return fmt.Errorf("unable to publish file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, relativePath+suffix)] = filepath.Join(basePath, relativePath) } } } release := make(Stanza) release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() release["Codename"] = p.Distribution release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST") release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ") release["Description"] = " Generated by aptly\n" release["MD5Sum"] = "\n" release["SHA1"] = "\n" release["SHA256"] = "\n" release["Components"] = strings.Join(p.Components(), " ") for path, info := range generatedFiles { release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) } var releaseFile *os.File releaseFilename := filepath.Join(tempDir, "Release") releaseFile, err = os.Create(releaseFilename) if err != nil { return fmt.Errorf("unable to create temporary Release file: %s", err) } bufWriter := bufio.NewWriter(releaseFile) err = release.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } releaseFile.Close() if suffix != "" { renameMap[filepath.Join(basePath, "Release"+suffix)] = filepath.Join(basePath, "Release") } err = publishedStorage.PutFile(filepath.Join(basePath, "Release"+suffix), releaseFilename) if err != nil { return fmt.Errorf("unable to publish file: %s", err) } // Signing files might output to console, so flush progress writer first if progress != nil { progress.Flush() } if signer != nil { err = signer.DetachedSign(releaseFilename, releaseFilename+".gpg") if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } err = signer.ClearSign(releaseFilename, filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix)) if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, "Release"+suffix+".gpg")] = filepath.Join(basePath, "Release.gpg") renameMap[filepath.Join(basePath, "InRelease"+suffix)] = filepath.Join(basePath, "InRelease") } err = publishedStorage.PutFile(filepath.Join(basePath, "Release"+suffix+".gpg"), releaseFilename+".gpg") if err != nil { return fmt.Errorf("unable to publish file: %s", err) } err = publishedStorage.PutFile(filepath.Join(basePath, "InRelease"+suffix), filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix)) if err != nil { return fmt.Errorf("unable to publish file: %s", err) } } for oldName, newName := range renameMap { err = publishedStorage.RenameFile(oldName, newName) if err != nil { return fmt.Errorf("unable to rename: %s", err) } } return nil }