func (a *Api) RepoAddFile(repoName, fileName string, removeAfter, forceReplace bool) (*aptly.RecordingResultReporter, []string, error) { verifier := &utils.GpgVerifier{} collection := a.Ctx().CollectionFactory().LocalRepoCollection() collection.Lock() defer collection.Unlock() repo, err := collection.ByName(repoName) if err != nil { return nil, []string{}, err } err = collection.LoadComplete(repo) if err != nil { return nil, []string{}, err } var reporter = &aptly.RecordingResultReporter{ Warnings: []string{}, AddedLines: []string{}, RemovedLines: []string{}, } var packageFiles, failedFiles []string packageFiles, failedFiles = deb.CollectPackageFiles([]string{fileName}, reporter) list, err := deb.NewPackageListFromRefList(repo.RefList(), a.Ctx().CollectionFactory().PackageCollection(), nil) if err != nil { return nil, []string{}, err } var processedFiles, failedFiles2 []string processedFiles, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, a.Ctx().PackagePool(), a.Ctx().CollectionFactory().PackageCollection(), reporter, nil) failedFiles = append(failedFiles, failedFiles2...) if err != nil { return nil, []string{}, err } repo.UpdateRefList(deb.NewPackageRefListFromPackageList(list)) err = collection.Update(repo) if err != nil { return nil, []string{}, err } processedFiles = utils.StrSliceDeduplicate(processedFiles) if removeAfter { for _, file := range processedFiles { os.Remove(file) } } if failedFiles == nil { failedFiles = []string{} } return reporter, failedFiles, nil }
// POST /repos/:name/file/:dir func apiReposPackageFromDir(c *gin.Context) { forceReplace := c.Request.URL.Query().Get("forceReplace") == "1" noRemove := c.Request.URL.Query().Get("noRemove") == "1" if !verifyDir(c) { return } fileParam := c.Params.ByName("file") if fileParam != "" && !verifyPath(fileParam) { c.Fail(400, fmt.Errorf("wrong file")) return } collection := context.CollectionFactory().LocalRepoCollection() collection.Lock() defer collection.Unlock() repo, err := collection.ByName(c.Params.ByName("name")) if err != nil { c.Fail(404, err) return } err = collection.LoadComplete(repo) if err != nil { c.Fail(500, err) return } verifier := &utils.GpgVerifier{} var ( sources []string packageFiles, failedFiles []string processedFiles, failedFiles2 []string reporter = &aptly.RecordingResultReporter{ Warnings: []string{}, AddedLines: []string{}, RemovedLines: []string{}, } list *deb.PackageList ) if fileParam == "" { sources = []string{filepath.Join(context.UploadPath(), c.Params.ByName("dir"))} } else { sources = []string{filepath.Join(context.UploadPath(), c.Params.ByName("dir"), c.Params.ByName("file"))} } packageFiles, failedFiles = deb.CollectPackageFiles(sources, reporter) list, err = deb.NewPackageListFromRefList(repo.RefList(), context.CollectionFactory().PackageCollection(), nil) if err != nil { c.Fail(500, fmt.Errorf("unable to load packages: %s", err)) return } processedFiles, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, context.PackagePool(), context.CollectionFactory().PackageCollection(), reporter, nil) failedFiles = append(failedFiles, failedFiles2...) if err != nil { c.Fail(500, fmt.Errorf("unable to import package files: %s", err)) return } repo.UpdateRefList(deb.NewPackageRefListFromPackageList(list)) err = context.CollectionFactory().LocalRepoCollection().Update(repo) if err != nil { c.Fail(500, fmt.Errorf("unable to save: %s", err)) return } if !noRemove { processedFiles = utils.StrSliceDeduplicate(processedFiles) for _, file := range processedFiles { err := os.Remove(file) if err != nil { reporter.Warning("unable to remove file %s: %s", file, err) } } // atempt to remove dir, if it fails, that's fine: probably it's not empty os.Remove(filepath.Join(context.UploadPath(), c.Params.ByName("dir"))) } if failedFiles == nil { failedFiles = []string{} } c.JSON(200, gin.H{ "Report": reporter, "FailedFiles": failedFiles, }) }
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider, collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress, forceOverwrite bool) error { publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage) err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool")) if err != nil { return err } basePath := filepath.Join(p.Prefix, "dists", p.Distribution) err = publishedStorage.MkDir(basePath) if err != nil { return err } if progress != nil { progress.Printf("Loading packages...\n") } lists := map[string]*PackageList{} for component := range p.sourceItems { // Load all packages lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } } if !p.rePublishing { if len(p.Architectures) == 0 { for _, list := range lists { p.Architectures = append(p.Architectures, list.Architectures(true)...) } } if len(p.Architectures) == 0 { return fmt.Errorf("unable to figure out list of architectures, please supply explicit list") } sort.Strings(p.Architectures) p.Architectures = utils.StrSliceDeduplicate(p.Architectures) } var suffix string if p.rePublishing { suffix = ".tmp" } if progress != nil { progress.Printf("Generating metadata files and linking package files...\n") } var tempDir string tempDir, err = ioutil.TempDir(os.TempDir(), "aptly") if err != nil { return err } defer os.RemoveAll(tempDir) indexes := newIndexFiles(publishedStorage, basePath, tempDir, suffix) for component, list := range lists { hadUdebs := false // For all architectures, pregenerate packages/sources files for _, arch := range p.Architectures { indexes.PackageIndex(component, arch, false) } if progress != nil { progress.InitBar(int64(list.Len()), false) } list.PrepareIndex() contentIndexes := map[string]*ContentsIndex{} err = list.ForEachIndexed(func(pkg *Package) error { if progress != nil { progress.AddBar(1) } matches := false for _, arch := range p.Architectures { if pkg.MatchesArchitecture(arch) { matches = true break } } if matches { hadUdebs = hadUdebs || pkg.IsUdeb err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite) if err != nil { return err } } for _, arch := range p.Architectures { if pkg.MatchesArchitecture(arch) { var bufWriter *bufio.Writer if !p.SkipContents { key := fmt.Sprintf("%s-%v", arch, pkg.IsUdeb) contentIndex := contentIndexes[key] if contentIndex == nil { contentIndex = NewContentsIndex() contentIndexes[key] = contentIndex } contentIndex.Push(pkg, packagePool) } bufWriter, err = indexes.PackageIndex(component, arch, pkg.IsUdeb).BufWriter() if err != nil { return err } err = pkg.Stanza().WriteTo(bufWriter, pkg.IsSource, false) if err != nil { return err } err = bufWriter.WriteByte('\n') if err != nil { return err } } } pkg.files = nil pkg.deps = nil pkg.extra = nil pkg.contents = nil return nil }) if err != nil { return fmt.Errorf("unable to process packages: %s", err) } for _, arch := range p.Architectures { for _, udeb := range []bool{true, false} { index := contentIndexes[fmt.Sprintf("%s-%v", arch, udeb)] if index == nil || index.Empty() { continue } bufWriter, err := indexes.ContentsIndex(component, arch, udeb).BufWriter() if err != nil { return fmt.Errorf("unable to generate contents index: %v", err) } _, err = index.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to generate contents index: %v", err) } } } if progress != nil { progress.ShutdownBar() } udebs := []bool{false} if hadUdebs { udebs = append(udebs, true) // For all architectures, pregenerate .udeb indexes for _, arch := range p.Architectures { indexes.PackageIndex(component, arch, true) } } // For all architectures, generate Release files for _, arch := range p.Architectures { for _, udeb := range udebs { release := make(Stanza) release["Archive"] = p.Distribution release["Architecture"] = arch release["Component"] = component release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() var bufWriter *bufio.Writer bufWriter, err = indexes.ReleaseIndex(component, arch, udeb).BufWriter() if err != nil { return fmt.Errorf("unable to get ReleaseIndex writer: %s", err) } err = release.WriteTo(bufWriter, false, true) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } } } } if progress != nil { progress.Printf("Finalizing metadata files...\n") } err = indexes.FinalizeAll(progress) if err != nil { return err } release := make(Stanza) release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() release["Suite"] = p.Distribution release["Codename"] = p.Distribution release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST") release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ") release["Description"] = " Generated by aptly\n" release["MD5Sum"] = "" release["SHA1"] = "" release["SHA256"] = "" release["SHA512"] = "" release["Components"] = strings.Join(p.Components(), " ") for path, info := range indexes.generatedFiles { release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) release["SHA512"] += fmt.Sprintf(" %s %8d %s\n", info.SHA512, info.Size, path) } releaseFile := indexes.ReleaseFile() bufWriter, err := releaseFile.BufWriter() if err != nil { return err } err = release.WriteTo(bufWriter, false, true) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } // Signing files might output to console, so flush progress writer first if progress != nil { progress.Flush() } err = releaseFile.Finalize(signer) if err != nil { return err } err = indexes.RenameFiles() if err != nil { return err } return nil }
// ExpandGroups expands list of keys/groups into list of keys func (u *Uploaders) ExpandGroups(items []string) []string { result := u.expandGroupsInternal(items, []string{}) return utils.StrSliceDeduplicate(result) }
func aptlyRepoInclude(cmd *commander.Command, args []string) error { var err error if len(args) < 1 { cmd.Usage() return commander.ErrCommandError } verifier, err := getVerifier(context.Flags()) if err != nil { return fmt.Errorf("unable to initialize GPG verifier: %s", err) } if verifier == nil { verifier = &utils.GpgVerifier{} } forceReplace := context.Flags().Lookup("force-replace").Value.Get().(bool) acceptUnsigned := context.Flags().Lookup("accept-unsigned").Value.Get().(bool) ignoreSignatures := context.Flags().Lookup("ignore-signatures").Value.Get().(bool) noRemoveFiles := context.Flags().Lookup("no-remove-files").Value.Get().(bool) repoTemplate, err := template.New("repo").Parse(context.Flags().Lookup("repo").Value.Get().(string)) if err != nil { return fmt.Errorf("error parsing -repo template: %s", err) } uploaders := (*deb.Uploaders)(nil) uploadersFile := context.Flags().Lookup("uploaders-file").Value.Get().(string) if uploadersFile != "" { uploaders, err = deb.NewUploadersFromFile(uploadersFile) if err != nil { return err } for i := range uploaders.Rules { uploaders.Rules[i].CompiledCondition, err = query.Parse(uploaders.Rules[i].Condition) if err != nil { return fmt.Errorf("error parsing query %s: %s", uploaders.Rules[i].Condition, err) } } } reporter := &aptly.ConsoleResultReporter{Progress: context.Progress()} var changesFiles, failedFiles, processedFiles []string changesFiles, failedFiles = deb.CollectChangesFiles(args, reporter) for _, path := range changesFiles { var changes *deb.Changes changes, err = deb.NewChanges(path) if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", path, err) continue } err = changes.VerifyAndParse(acceptUnsigned, ignoreSignatures, verifier) if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } err = changes.Prepare() if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } repoName := &bytes.Buffer{} err = repoTemplate.Execute(repoName, changes.Stanza) if err != nil { return fmt.Errorf("error applying template to repo: %s", err) } context.Progress().Printf("Loading repository %s for changes file %s...\n", repoName.String(), changes.ChangesName) repo, err := context.CollectionFactory().LocalRepoCollection().ByName(repoName.String()) if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } currentUploaders := uploaders if repo.Uploaders != nil { currentUploaders = repo.Uploaders for i := range currentUploaders.Rules { currentUploaders.Rules[i].CompiledCondition, err = query.Parse(currentUploaders.Rules[i].Condition) if err != nil { return fmt.Errorf("error parsing query %s: %s", currentUploaders.Rules[i].Condition, err) } } } if currentUploaders != nil { if err = currentUploaders.IsAllowed(changes); err != nil { failedFiles = append(failedFiles, path) reporter.Warning("changes file skipped due to uploaders config: %s, keys %#v: %s", changes.ChangesName, changes.SignatureKeys, err) changes.Cleanup() continue } } err = context.CollectionFactory().LocalRepoCollection().LoadComplete(repo) if err != nil { return fmt.Errorf("unable to load repo: %s", err) } list, err := deb.NewPackageListFromRefList(repo.RefList(), context.CollectionFactory().PackageCollection(), context.Progress()) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } packageFiles, _ := deb.CollectPackageFiles([]string{changes.TempDir}, reporter) var restriction deb.PackageQuery restriction, err = changes.PackageQuery() if err != nil { failedFiles = append(failedFiles, path) reporter.Warning("unable to process file %s: %s", changes.ChangesName, err) changes.Cleanup() continue } var processedFiles2, failedFiles2 []string processedFiles2, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, context.PackagePool(), context.CollectionFactory().PackageCollection(), reporter, restriction) if err != nil { return fmt.Errorf("unable to import package files: %s", err) } repo.UpdateRefList(deb.NewPackageRefListFromPackageList(list)) err = context.CollectionFactory().LocalRepoCollection().Update(repo) if err != nil { return fmt.Errorf("unable to save: %s", err) } err = changes.Cleanup() if err != nil { return err } for _, file := range failedFiles2 { failedFiles = append(failedFiles, filepath.Join(changes.BasePath, filepath.Base(file))) } for _, file := range processedFiles2 { processedFiles = append(processedFiles, filepath.Join(changes.BasePath, filepath.Base(file))) } processedFiles = append(processedFiles, path) } if !noRemoveFiles { processedFiles = utils.StrSliceDeduplicate(processedFiles) for _, file := range processedFiles { err := os.Remove(file) if err != nil { return fmt.Errorf("unable to remove file: %s", err) } } } if len(failedFiles) > 0 { context.Progress().ColoredPrintf("@y[!]@| @!Some files were skipped due to errors:@|") for _, file := range failedFiles { context.Progress().ColoredPrintf(" %s", file) } return fmt.Errorf("some files failed to be added") } return err }
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorage aptly.PublishedStorage, collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress) error { err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool")) if err != nil { return err } basePath := filepath.Join(p.Prefix, "dists", p.Distribution) err = publishedStorage.MkDir(basePath) if err != nil { return err } if progress != nil { progress.Printf("Loading packages...\n") } lists := map[string]*PackageList{} for component := range p.sourceItems { // Load all packages lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } } if !p.rePublishing { if len(p.Architectures) == 0 { for _, list := range lists { p.Architectures = append(p.Architectures, list.Architectures(true)...) } } if len(p.Architectures) == 0 { return fmt.Errorf("unable to figure out list of architectures, please supply explicit list") } sort.Strings(p.Architectures) p.Architectures = utils.StrSliceDeduplicate(p.Architectures) } var suffix string if p.rePublishing { suffix = ".tmp" } generatedFiles := map[string]utils.ChecksumInfo{} renameMap := map[string]string{} if progress != nil { progress.Printf("Generating metadata files and linking package files...\n") } for component, list := range lists { var relativePath string // For all architectures, generate packages/sources files for _, arch := range p.Architectures { if progress != nil { progress.InitBar(int64(list.Len()), false) } if arch == "source" { relativePath = filepath.Join(component, "source", "Sources") } else { relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Packages") } err = publishedStorage.MkDir(filepath.Dir(filepath.Join(basePath, relativePath))) if err != nil { return err } var packagesFile *os.File packagesFile, err = publishedStorage.CreateFile(filepath.Join(basePath, relativePath+suffix)) if err != nil { return fmt.Errorf("unable to creates Packages file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, relativePath+suffix)] = filepath.Join(basePath, relativePath) } bufWriter := bufio.NewWriter(packagesFile) err = list.ForEach(func(pkg *Package) error { if progress != nil { progress.AddBar(1) } if pkg.MatchesArchitecture(arch) { err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component) if err != nil { return err } err = pkg.Stanza().WriteTo(bufWriter) if err != nil { return err } err = bufWriter.WriteByte('\n') if err != nil { return err } pkg.files = nil pkg.deps = nil pkg.extra = nil } return nil }) if err != nil { return fmt.Errorf("unable to process packages: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to write Packages file: %s", err) } err = utils.CompressFile(packagesFile) if err != nil { return fmt.Errorf("unable to compress Packages files: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, relativePath+suffix+".gz")] = filepath.Join(basePath, relativePath+".gz") renameMap[filepath.Join(basePath, relativePath+suffix+".bz2")] = filepath.Join(basePath, relativePath+".bz2") } packagesFile.Close() var checksumInfo utils.ChecksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix)) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath] = checksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix+".gz")) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath+".gz"] = checksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix+".bz2")) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath+".bz2"] = checksumInfo if progress != nil { progress.ShutdownBar() } } // For all architectures, generate Release files for _, arch := range p.Architectures { release := make(Stanza) release["Archive"] = p.Distribution release["Architecture"] = arch release["Component"] = component release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() if arch == "source" { relativePath = filepath.Join(component, "source", "Release") } else { relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Release") } var file *os.File file, err = publishedStorage.CreateFile(filepath.Join(basePath, relativePath+suffix)) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, relativePath+suffix)] = filepath.Join(basePath, relativePath) } bufWriter := bufio.NewWriter(file) err = release.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } file.Close() var checksumInfo utils.ChecksumInfo checksumInfo, err = publishedStorage.ChecksumsForFile(filepath.Join(basePath, relativePath+suffix)) if err != nil { return fmt.Errorf("unable to collect checksums: %s", err) } generatedFiles[relativePath] = checksumInfo } } release := make(Stanza) release["Origin"] = p.GetOrigin() release["Label"] = p.GetLabel() release["Codename"] = p.Distribution release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST") release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ") release["Description"] = " Generated by aptly\n" release["MD5Sum"] = "\n" release["SHA1"] = "\n" release["SHA256"] = "\n" release["Components"] = strings.Join(p.Components(), " ") for path, info := range generatedFiles { release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path) release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path) release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path) } releaseFile, err := publishedStorage.CreateFile(filepath.Join(basePath, "Release"+suffix)) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, "Release"+suffix)] = filepath.Join(basePath, "Release") } bufWriter := bufio.NewWriter(releaseFile) err = release.WriteTo(bufWriter) if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } err = bufWriter.Flush() if err != nil { return fmt.Errorf("unable to create Release file: %s", err) } releaseFilename := releaseFile.Name() releaseFile.Close() // Signing files might output to console, so flush progress writer first if progress != nil { progress.Flush() } if signer != nil { err = signer.DetachedSign(releaseFilename, releaseFilename+".gpg") if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } err = signer.ClearSign(releaseFilename, filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix)) if err != nil { return fmt.Errorf("unable to sign Release file: %s", err) } if suffix != "" { renameMap[filepath.Join(basePath, "Release"+suffix+".gpg")] = filepath.Join(basePath, "Release.gpg") renameMap[filepath.Join(basePath, "InRelease"+suffix)] = filepath.Join(basePath, "InRelease") } } for oldName, newName := range renameMap { err = publishedStorage.RenameFile(oldName, newName) if err != nil { return fmt.Errorf("unable to rename: %s", err) } } return nil }
func aptlyRepoAdd(cmd *commander.Command, args []string) error { var err error if len(args) < 2 { cmd.Usage() return err } name := args[0] verifier := &utils.GpgVerifier{} localRepoCollection := debian.NewLocalRepoCollection(context.database) repo, err := localRepoCollection.ByName(name) if err != nil { return fmt.Errorf("unable to add: %s", err) } err = localRepoCollection.LoadComplete(repo) if err != nil { return fmt.Errorf("unable to add: %s", err) } context.progress.Printf("Loading packages...\n") packageCollection := debian.NewPackageCollection(context.database) list, err := debian.NewPackageListFromRefList(repo.RefList(), packageCollection) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } packageFiles := []string{} for _, location := range args[1:] { info, err := os.Stat(location) if err != nil { context.progress.ColoredPrintf("@y[!]@| @!Unable to process %s: %s@|", location, err) continue } if info.IsDir() { err = filepath.Walk(location, func(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { return nil } if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") { packageFiles = append(packageFiles, path) } return nil }) } else { if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") { packageFiles = append(packageFiles, location) } else { context.progress.ColoredPrintf("@y[!]@| @!Unknwon file extenstion: %s@|", location) continue } } } processedFiles := []string{} sort.Strings(packageFiles) for _, file := range packageFiles { var ( stanza debian.Stanza err error p *debian.Package ) isSourcePackage := strings.HasSuffix(file, ".dsc") if isSourcePackage { stanza, err = debian.GetControlFileFromDsc(file, verifier) if err == nil { stanza["Package"] = stanza["Source"] delete(stanza, "Source") p, err = debian.NewSourcePackageFromControlFile(stanza) } } else { stanza, err = debian.GetControlFileFromDeb(file) p = debian.NewPackageFromControlFile(stanza) } if err != nil { context.progress.ColoredPrintf("@y[!]@| @!Unable to read file %s: %s@|", file, err) continue } checksums, err := utils.ChecksumsForFile(file) if err != nil { return err } if isSourcePackage { p.Files = append(p.Files, debian.PackageFile{Filename: filepath.Base(file), Checksums: checksums}) } else { p.Files = []debian.PackageFile{debian.PackageFile{Filename: filepath.Base(file), Checksums: checksums}} } err = context.packagePool.Import(file, checksums.MD5) if err != nil { context.progress.ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", file, err) continue } processedFiles = append(processedFiles, file) // go over all files, except for the last one (.dsc/.deb itself) for i := 0; i < len(p.Files)-1; i++ { sourceFile := filepath.Join(filepath.Dir(file), filepath.Base(p.Files[i].Filename)) err = context.packagePool.Import(sourceFile, p.Files[i].Checksums.MD5) if err != nil { context.progress.ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", sourceFile, err) break } processedFiles = append(processedFiles, sourceFile) } if err != nil { // some files haven't been imported continue } err = packageCollection.Update(p) if err != nil { context.progress.ColoredPrintf("@y[!]@| @!Unable to save package %s: %s@|", p, err) continue } err = list.Add(p) if err != nil { context.progress.ColoredPrintf("@y[!]@| @!Unable to add package to repo %s: %s@|", p, err) continue } context.progress.ColoredPrintf("@g[+]@| %s added@|", p) } repo.UpdateRefList(debian.NewPackageRefListFromPackageList(list)) err = localRepoCollection.Update(repo) if err != nil { return fmt.Errorf("unable to save: %s", err) } if cmd.Flag.Lookup("remove-files").Value.Get().(bool) { processedFiles = utils.StrSliceDeduplicate(processedFiles) for _, file := range processedFiles { err := os.Remove(file) if err != nil { return fmt.Errorf("unable to remove file: %s", err) } } } return err }
func aptlyRepoAdd(cmd *commander.Command, args []string) error { var err error if len(args) < 2 { cmd.Usage() return commander.ErrCommandError } name := args[0] verifier := &utils.GpgVerifier{} repo, err := context.CollectionFactory().LocalRepoCollection().ByName(name) if err != nil { return fmt.Errorf("unable to add: %s", err) } err = context.CollectionFactory().LocalRepoCollection().LoadComplete(repo) if err != nil { return fmt.Errorf("unable to add: %s", err) } context.Progress().Printf("Loading packages...\n") list, err := deb.NewPackageListFromRefList(repo.RefList(), context.CollectionFactory().PackageCollection(), context.Progress()) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } forceReplace := context.Flags().Lookup("force-replace").Value.Get().(bool) var packageFiles, failedFiles []string packageFiles, failedFiles = deb.CollectPackageFiles(args[1:], &aptly.ConsoleResultReporter{Progress: context.Progress()}) var processedFiles, failedFiles2 []string processedFiles, failedFiles2, err = deb.ImportPackageFiles(list, packageFiles, forceReplace, verifier, context.PackagePool(), context.CollectionFactory().PackageCollection(), &aptly.ConsoleResultReporter{Progress: context.Progress()}, nil) failedFiles = append(failedFiles, failedFiles2...) if err != nil { return fmt.Errorf("unable to import package files: %s", err) } repo.UpdateRefList(deb.NewPackageRefListFromPackageList(list)) err = context.CollectionFactory().LocalRepoCollection().Update(repo) if err != nil { return fmt.Errorf("unable to save: %s", err) } if context.Flags().Lookup("remove-files").Value.Get().(bool) { processedFiles = utils.StrSliceDeduplicate(processedFiles) for _, file := range processedFiles { err := os.Remove(file) if err != nil { return fmt.Errorf("unable to remove file: %s", err) } } } if len(failedFiles) > 0 { context.Progress().ColoredPrintf("@y[!]@| @!Some files were skipped due to errors:@|") for _, file := range failedFiles { context.Progress().ColoredPrintf(" %s", file) } return fmt.Errorf("some files failed to be added") } return err }
func aptlyRepoAdd(cmd *commander.Command, args []string) error { var err error if len(args) < 2 { cmd.Usage() return commander.ErrCommandError } name := args[0] verifier := &utils.GpgVerifier{} repo, err := context.CollectionFactory().LocalRepoCollection().ByName(name) if err != nil { return fmt.Errorf("unable to add: %s", err) } err = context.CollectionFactory().LocalRepoCollection().LoadComplete(repo) if err != nil { return fmt.Errorf("unable to add: %s", err) } context.Progress().Printf("Loading packages...\n") list, err := deb.NewPackageListFromRefList(repo.RefList(), context.CollectionFactory().PackageCollection(), context.Progress()) if err != nil { return fmt.Errorf("unable to load packages: %s", err) } packageFiles := []string{} failedFiles := []string{} for _, location := range args[1:] { info, err2 := os.Stat(location) if err2 != nil { context.Progress().ColoredPrintf("@y[!]@| @!Unable to process %s: %s@|", location, err2) failedFiles = append(failedFiles, location) continue } if info.IsDir() { err2 = filepath.Walk(location, func(path string, info os.FileInfo, err3 error) error { if err3 != nil { return err3 } if info.IsDir() { return nil } if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") { packageFiles = append(packageFiles, path) } return nil }) } else { if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") { packageFiles = append(packageFiles, location) } else { context.Progress().ColoredPrintf("@y[!]@| @!Unknwon file extenstion: %s@|", location) failedFiles = append(failedFiles, location) continue } } } processedFiles := []string{} sort.Strings(packageFiles) for _, file := range packageFiles { var ( stanza deb.Stanza p *deb.Package ) candidateProcessedFiles := []string{} isSourcePackage := strings.HasSuffix(file, ".dsc") if isSourcePackage { stanza, err = deb.GetControlFileFromDsc(file, verifier) if err == nil { stanza["Package"] = stanza["Source"] delete(stanza, "Source") p, err = deb.NewSourcePackageFromControlFile(stanza) } } else { stanza, err = deb.GetControlFileFromDeb(file) p = deb.NewPackageFromControlFile(stanza) } if err != nil { context.Progress().ColoredPrintf("@y[!]@| @!Unable to read file %s: %s@|", file, err) failedFiles = append(failedFiles, file) continue } var checksums utils.ChecksumInfo checksums, err = utils.ChecksumsForFile(file) if err != nil { return err } if isSourcePackage { p.UpdateFiles(append(p.Files(), deb.PackageFile{Filename: filepath.Base(file), Checksums: checksums})) } else { p.UpdateFiles([]deb.PackageFile{deb.PackageFile{Filename: filepath.Base(file), Checksums: checksums}}) } err = context.PackagePool().Import(file, checksums.MD5) if err != nil { context.Progress().ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", file, err) failedFiles = append(failedFiles, file) continue } candidateProcessedFiles = append(candidateProcessedFiles, file) // go over all files, except for the last one (.dsc/.deb itself) for _, f := range p.Files() { if filepath.Base(f.Filename) == filepath.Base(file) { continue } sourceFile := filepath.Join(filepath.Dir(file), filepath.Base(f.Filename)) err = context.PackagePool().Import(sourceFile, f.Checksums.MD5) if err != nil { context.Progress().ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", sourceFile, err) failedFiles = append(failedFiles, file) break } candidateProcessedFiles = append(candidateProcessedFiles, sourceFile) } if err != nil { // some files haven't been imported continue } err = context.CollectionFactory().PackageCollection().Update(p) if err != nil { context.Progress().ColoredPrintf("@y[!]@| @!Unable to save package %s: %s@|", p, err) failedFiles = append(failedFiles, file) continue } err = list.Add(p) if err != nil { context.Progress().ColoredPrintf("@y[!]@| @!Unable to add package to repo %s: %s@|", p, err) failedFiles = append(failedFiles, file) continue } context.Progress().ColoredPrintf("@g[+]@| %s added@|", p) processedFiles = append(processedFiles, candidateProcessedFiles...) } repo.UpdateRefList(deb.NewPackageRefListFromPackageList(list)) err = context.CollectionFactory().LocalRepoCollection().Update(repo) if err != nil { return fmt.Errorf("unable to save: %s", err) } if context.flags.Lookup("remove-files").Value.Get().(bool) { processedFiles = utils.StrSliceDeduplicate(processedFiles) for _, file := range processedFiles { err := os.Remove(file) if err != nil { return fmt.Errorf("unable to remove file: %s", err) } } } if len(failedFiles) > 0 { context.Progress().ColoredPrintf("@y[!]@| @!Some files were skipped due to errors:@|") for _, file := range failedFiles { context.Progress().ColoredPrintf(" %s", file) } return fmt.Errorf("Some files failed to be added") } return err }