示例#1
0
文件: changes.go 项目: taku-k/aptly
// Prepare creates temporary directory, copies file there and verifies checksums
func (c *Changes) Prepare() error {
	var err error

	for _, file := range c.Files {
		if filepath.Dir(file.Filename) != "." {
			return fmt.Errorf("file is not in the same folder as .changes file: %s", file.Filename)
		}

		file.Filename = filepath.Base(file.Filename)

		err = utils.CopyFile(filepath.Join(c.BasePath, file.Filename), filepath.Join(c.TempDir, file.Filename))
		if err != nil {
			return err
		}
	}

	for _, file := range c.Files {
		var info utils.ChecksumInfo

		info, err = utils.ChecksumsForFile(filepath.Join(c.TempDir, file.Filename))
		if err != nil {
			return err
		}

		if info.Size != file.Checksums.Size {
			return fmt.Errorf("size mismatch: expected %v != obtained %v", file.Checksums.Size, info.Size)
		}

		if info.MD5 != file.Checksums.MD5 {
			return fmt.Errorf("checksum mismatch MD5: expected %v != obtained %v", file.Checksums.MD5, info.MD5)
		}

		if info.SHA1 != file.Checksums.SHA1 {
			return fmt.Errorf("checksum mismatch SHA1: expected %v != obtained %v", file.Checksums.SHA1, info.SHA1)
		}

		if info.SHA256 != file.Checksums.SHA256 {
			return fmt.Errorf("checksum mismatch SHA256 expected %v != obtained %v", file.Checksums.SHA256, info.SHA256)
		}
	}

	return nil
}
示例#2
0
文件: public.go 项目: hdonnay/aptly
// ChecksumsForFile proxies requests to utils.ChecksumsForFile, joining public path
func (storage *PublishedStorage) ChecksumsForFile(path string) (utils.ChecksumInfo, error) {
	return utils.ChecksumsForFile(filepath.Join(storage.rootPath, path))
}
示例#3
0
func (file *indexFile) Finalize(signer utils.Signer) error {
	if file.w == nil {
		if file.discardable {
			return nil
		}
		file.BufWriter()
	}

	err := file.w.Flush()
	if err != nil {
		file.tempFile.Close()
		return fmt.Errorf("unable to write to index file: %s", err)
	}

	if file.compressable {
		err = utils.CompressFile(file.tempFile)
		if err != nil {
			file.tempFile.Close()
			return fmt.Errorf("unable to compress index file: %s", err)
		}
	}

	file.tempFile.Close()

	exts := []string{""}
	if file.compressable {
		exts = append(exts, ".gz", ".bz2")
		if file.onlyGzip {
			exts = []string{".gz"}
		}
	}

	for _, ext := range exts {
		var checksumInfo utils.ChecksumInfo

		checksumInfo, err = utils.ChecksumsForFile(file.tempFilename + ext)
		if err != nil {
			return fmt.Errorf("unable to collect checksums: %s", err)
		}
		file.parent.generatedFiles[file.relativePath+ext] = checksumInfo
	}

	err = file.parent.publishedStorage.MkDir(filepath.Dir(filepath.Join(file.parent.basePath, file.relativePath)))
	if err != nil {
		return fmt.Errorf("unable to create dir: %s", err)
	}

	for _, ext := range exts {
		err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext),
			file.tempFilename+ext)
		if err != nil {
			return fmt.Errorf("unable to publish file: %s", err)
		}

		if file.parent.suffix != "" {
			file.parent.renameMap[filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+ext)] =
				filepath.Join(file.parent.basePath, file.relativePath+ext)
		}
	}

	if file.signable && signer != nil {
		err = signer.DetachedSign(file.tempFilename, file.tempFilename+".gpg")
		if err != nil {
			return fmt.Errorf("unable to detached sign file: %s", err)
		}

		err = signer.ClearSign(file.tempFilename, filepath.Join(filepath.Dir(file.tempFilename), "In"+filepath.Base(file.tempFilename)))
		if err != nil {
			return fmt.Errorf("unable to clearsign file: %s", err)
		}

		if file.parent.suffix != "" {
			file.parent.renameMap[filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+".gpg")] =
				filepath.Join(file.parent.basePath, file.relativePath+".gpg")
			file.parent.renameMap[filepath.Join(file.parent.basePath, "In"+file.relativePath+file.parent.suffix)] =
				filepath.Join(file.parent.basePath, "In"+file.relativePath)
		}

		err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, file.relativePath+file.parent.suffix+".gpg"),
			file.tempFilename+".gpg")
		if err != nil {
			return fmt.Errorf("unable to publish file: %s", err)
		}

		err = file.parent.publishedStorage.PutFile(filepath.Join(file.parent.basePath, "In"+file.relativePath+file.parent.suffix),
			filepath.Join(filepath.Dir(file.tempFilename), "In"+filepath.Base(file.tempFilename)))
		if err != nil {
			return fmt.Errorf("unable to publish file: %s", err)
		}
	}

	return nil
}
示例#4
0
// ChecksumsForFile proxies requests to utils.ChecksumsForFile, joining public path
func (r *Repository) ChecksumsForFile(path string) (utils.ChecksumInfo, error) {
	return utils.ChecksumsForFile(filepath.Join(r.RootPath, "public", path))
}
示例#5
0
// Publish publishes snapshot (repository) contents, links package files, generates Packages & Release files, signs them
func (p *PublishedRepo) Publish(packagePool aptly.PackagePool, publishedStorageProvider aptly.PublishedStorageProvider,
	collectionFactory *CollectionFactory, signer utils.Signer, progress aptly.Progress, forceOverwrite bool) error {
	publishedStorage := publishedStorageProvider.GetPublishedStorage(p.Storage)

	err := publishedStorage.MkDir(filepath.Join(p.Prefix, "pool"))
	if err != nil {
		return err
	}
	basePath := filepath.Join(p.Prefix, "dists", p.Distribution)
	err = publishedStorage.MkDir(basePath)
	if err != nil {
		return err
	}

	if progress != nil {
		progress.Printf("Loading packages...\n")
	}

	lists := map[string]*PackageList{}

	for component := range p.sourceItems {
		// Load all packages
		lists[component], err = NewPackageListFromRefList(p.RefList(component), collectionFactory.PackageCollection(), progress)
		if err != nil {
			return fmt.Errorf("unable to load packages: %s", err)
		}
	}

	if !p.rePublishing {
		if len(p.Architectures) == 0 {
			for _, list := range lists {
				p.Architectures = append(p.Architectures, list.Architectures(true)...)
			}
		}

		if len(p.Architectures) == 0 {
			return fmt.Errorf("unable to figure out list of architectures, please supply explicit list")
		}

		sort.Strings(p.Architectures)
		p.Architectures = utils.StrSliceDeduplicate(p.Architectures)
	}

	var suffix string
	if p.rePublishing {
		suffix = ".tmp"
	}

	generatedFiles := map[string]utils.ChecksumInfo{}
	renameMap := map[string]string{}

	if progress != nil {
		progress.Printf("Generating metadata files and linking package files...\n")
	}

	var tempDir string
	tempDir, err = ioutil.TempDir(os.TempDir(), "aptly")
	if err != nil {
		return err
	}
	defer os.RemoveAll(tempDir)

	for component, list := range lists {
		var relativePath string

		// For all architectures, generate packages/sources files
		for _, arch := range p.Architectures {
			if progress != nil {
				progress.InitBar(int64(list.Len()), false)
			}

			if arch == "source" {
				relativePath = filepath.Join(component, "source", "Sources")
			} else {
				relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Packages")
			}
			err = publishedStorage.MkDir(filepath.Dir(filepath.Join(basePath, relativePath)))
			if err != nil {
				return err
			}

			var packagesFile *os.File

			packagesFileName := filepath.Join(tempDir, fmt.Sprintf("pkgs_%s_%s", component, arch))
			packagesFile, err = os.Create(packagesFileName)
			if err != nil {
				return fmt.Errorf("unable to create temporary Packages file: %s", err)
			}

			bufWriter := bufio.NewWriter(packagesFile)

			err = list.ForEach(func(pkg *Package) error {
				if progress != nil {
					progress.AddBar(1)
				}
				if pkg.MatchesArchitecture(arch) {
					err = pkg.LinkFromPool(publishedStorage, packagePool, p.Prefix, component, forceOverwrite)
					if err != nil {
						return err
					}

					err = pkg.Stanza().WriteTo(bufWriter)
					if err != nil {
						return err
					}
					err = bufWriter.WriteByte('\n')
					if err != nil {
						return err
					}

					pkg.files = nil
					pkg.deps = nil
					pkg.extra = nil

				}

				return nil
			})

			if err != nil {
				return fmt.Errorf("unable to process packages: %s", err)
			}

			err = bufWriter.Flush()
			if err != nil {
				return fmt.Errorf("unable to write Packages file: %s", err)
			}

			err = utils.CompressFile(packagesFile)
			if err != nil {
				return fmt.Errorf("unable to compress Packages files: %s", err)
			}

			packagesFile.Close()

			for _, ext := range []string{"", ".gz", ".bz2"} {
				var checksumInfo utils.ChecksumInfo

				checksumInfo, err = utils.ChecksumsForFile(packagesFileName + ext)
				if err != nil {
					return fmt.Errorf("unable to collect checksums: %s", err)
				}
				generatedFiles[relativePath+ext] = checksumInfo

				err = publishedStorage.PutFile(filepath.Join(basePath, relativePath+suffix+ext), packagesFileName+ext)
				if err != nil {
					return fmt.Errorf("unable to publish file: %s", err)
				}

				if suffix != "" {
					renameMap[filepath.Join(basePath, relativePath+suffix+ext)] = filepath.Join(basePath, relativePath+ext)
				}
			}

			if progress != nil {
				progress.ShutdownBar()
			}
		}

		// For all architectures, generate Release files
		for _, arch := range p.Architectures {
			release := make(Stanza)
			release["Archive"] = p.Distribution
			release["Architecture"] = arch
			release["Component"] = component
			release["Origin"] = p.GetOrigin()
			release["Label"] = p.GetLabel()

			if arch == "source" {
				relativePath = filepath.Join(component, "source", "Release")
			} else {
				relativePath = filepath.Join(component, fmt.Sprintf("binary-%s", arch), "Release")
			}

			var file *os.File

			fileName := filepath.Join(tempDir, fmt.Sprintf("release_%s_%s", component, arch))
			file, err = os.Create(fileName)
			if err != nil {
				return fmt.Errorf("unable to create temporary Release file: %s", err)
			}

			bufWriter := bufio.NewWriter(file)

			err = release.WriteTo(bufWriter)
			if err != nil {
				return fmt.Errorf("unable to create Release file: %s", err)
			}

			err = bufWriter.Flush()
			if err != nil {
				return fmt.Errorf("unable to create Release file: %s", err)
			}

			file.Close()

			var checksumInfo utils.ChecksumInfo
			checksumInfo, err = utils.ChecksumsForFile(fileName)
			if err != nil {
				return fmt.Errorf("unable to collect checksums: %s", err)
			}
			generatedFiles[relativePath] = checksumInfo

			err = publishedStorage.PutFile(filepath.Join(basePath, relativePath+suffix), fileName)
			if err != nil {
				file.Close()
				return fmt.Errorf("unable to publish file: %s", err)
			}

			if suffix != "" {
				renameMap[filepath.Join(basePath, relativePath+suffix)] = filepath.Join(basePath, relativePath)
			}

		}
	}

	release := make(Stanza)
	release["Origin"] = p.GetOrigin()
	release["Label"] = p.GetLabel()
	release["Codename"] = p.Distribution
	release["Date"] = time.Now().UTC().Format("Mon, 2 Jan 2006 15:04:05 MST")
	release["Architectures"] = strings.Join(utils.StrSlicesSubstract(p.Architectures, []string{"source"}), " ")
	release["Description"] = " Generated by aptly\n"
	release["MD5Sum"] = "\n"
	release["SHA1"] = "\n"
	release["SHA256"] = "\n"

	release["Components"] = strings.Join(p.Components(), " ")

	for path, info := range generatedFiles {
		release["MD5Sum"] += fmt.Sprintf(" %s %8d %s\n", info.MD5, info.Size, path)
		release["SHA1"] += fmt.Sprintf(" %s %8d %s\n", info.SHA1, info.Size, path)
		release["SHA256"] += fmt.Sprintf(" %s %8d %s\n", info.SHA256, info.Size, path)
	}

	var releaseFile *os.File
	releaseFilename := filepath.Join(tempDir, "Release")
	releaseFile, err = os.Create(releaseFilename)
	if err != nil {
		return fmt.Errorf("unable to create temporary Release file: %s", err)
	}

	bufWriter := bufio.NewWriter(releaseFile)

	err = release.WriteTo(bufWriter)
	if err != nil {
		return fmt.Errorf("unable to create Release file: %s", err)
	}

	err = bufWriter.Flush()
	if err != nil {
		return fmt.Errorf("unable to create Release file: %s", err)
	}

	releaseFile.Close()

	if suffix != "" {
		renameMap[filepath.Join(basePath, "Release"+suffix)] = filepath.Join(basePath, "Release")
	}

	err = publishedStorage.PutFile(filepath.Join(basePath, "Release"+suffix), releaseFilename)
	if err != nil {
		return fmt.Errorf("unable to publish file: %s", err)
	}

	// Signing files might output to console, so flush progress writer first
	if progress != nil {
		progress.Flush()
	}

	if signer != nil {
		err = signer.DetachedSign(releaseFilename, releaseFilename+".gpg")
		if err != nil {
			return fmt.Errorf("unable to sign Release file: %s", err)
		}

		err = signer.ClearSign(releaseFilename, filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix))
		if err != nil {
			return fmt.Errorf("unable to sign Release file: %s", err)
		}

		if suffix != "" {
			renameMap[filepath.Join(basePath, "Release"+suffix+".gpg")] = filepath.Join(basePath, "Release.gpg")
			renameMap[filepath.Join(basePath, "InRelease"+suffix)] = filepath.Join(basePath, "InRelease")
		}

		err = publishedStorage.PutFile(filepath.Join(basePath, "Release"+suffix+".gpg"), releaseFilename+".gpg")
		if err != nil {
			return fmt.Errorf("unable to publish file: %s", err)
		}

		err = publishedStorage.PutFile(filepath.Join(basePath, "InRelease"+suffix),
			filepath.Join(filepath.Dir(releaseFilename), "InRelease"+suffix))
		if err != nil {
			return fmt.Errorf("unable to publish file: %s", err)
		}
	}

	for oldName, newName := range renameMap {
		err = publishedStorage.RenameFile(oldName, newName)
		if err != nil {
			return fmt.Errorf("unable to rename: %s", err)
		}
	}

	return nil
}
示例#6
0
文件: repo_add.go 项目: hdonnay/aptly
func aptlyRepoAdd(cmd *commander.Command, args []string) error {
	var err error
	if len(args) < 2 {
		cmd.Usage()
		return err
	}

	name := args[0]

	verifier := &utils.GpgVerifier{}

	localRepoCollection := debian.NewLocalRepoCollection(context.database)
	repo, err := localRepoCollection.ByName(name)
	if err != nil {
		return fmt.Errorf("unable to add: %s", err)
	}

	err = localRepoCollection.LoadComplete(repo)
	if err != nil {
		return fmt.Errorf("unable to add: %s", err)
	}

	context.progress.Printf("Loading packages...\n")

	packageCollection := debian.NewPackageCollection(context.database)
	list, err := debian.NewPackageListFromRefList(repo.RefList(), packageCollection)
	if err != nil {
		return fmt.Errorf("unable to load packages: %s", err)
	}

	packageFiles := []string{}

	for _, location := range args[1:] {
		info, err := os.Stat(location)
		if err != nil {
			context.progress.ColoredPrintf("@y[!]@| @!Unable to process %s: %s@|", location, err)
			continue
		}
		if info.IsDir() {
			err = filepath.Walk(location, func(path string, info os.FileInfo, err error) error {
				if err != nil {
					return err
				}
				if info.IsDir() {
					return nil
				}

				if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") {
					packageFiles = append(packageFiles, path)
				}

				return nil
			})
		} else {
			if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") {
				packageFiles = append(packageFiles, location)
			} else {
				context.progress.ColoredPrintf("@y[!]@| @!Unknwon file extenstion: %s@|", location)
				continue
			}
		}
	}

	processedFiles := []string{}
	sort.Strings(packageFiles)

	for _, file := range packageFiles {
		var (
			stanza debian.Stanza
			err    error
			p      *debian.Package
		)

		isSourcePackage := strings.HasSuffix(file, ".dsc")

		if isSourcePackage {
			stanza, err = debian.GetControlFileFromDsc(file, verifier)

			if err == nil {
				stanza["Package"] = stanza["Source"]
				delete(stanza, "Source")

				p, err = debian.NewSourcePackageFromControlFile(stanza)
			}
		} else {
			stanza, err = debian.GetControlFileFromDeb(file)
			p = debian.NewPackageFromControlFile(stanza)
		}
		if err != nil {
			context.progress.ColoredPrintf("@y[!]@| @!Unable to read file %s: %s@|", file, err)
			continue
		}

		checksums, err := utils.ChecksumsForFile(file)
		if err != nil {
			return err
		}

		if isSourcePackage {
			p.Files = append(p.Files, debian.PackageFile{Filename: filepath.Base(file), Checksums: checksums})
		} else {
			p.Files = []debian.PackageFile{debian.PackageFile{Filename: filepath.Base(file), Checksums: checksums}}
		}

		err = context.packagePool.Import(file, checksums.MD5)
		if err != nil {
			context.progress.ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", file, err)
			continue
		}

		processedFiles = append(processedFiles, file)

		// go over all files, except for the last one (.dsc/.deb itself)
		for i := 0; i < len(p.Files)-1; i++ {
			sourceFile := filepath.Join(filepath.Dir(file), filepath.Base(p.Files[i].Filename))
			err = context.packagePool.Import(sourceFile, p.Files[i].Checksums.MD5)
			if err != nil {
				context.progress.ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", sourceFile, err)
				break
			}

			processedFiles = append(processedFiles, sourceFile)
		}
		if err != nil {
			// some files haven't been imported
			continue
		}

		err = packageCollection.Update(p)
		if err != nil {
			context.progress.ColoredPrintf("@y[!]@| @!Unable to save package %s: %s@|", p, err)
			continue
		}

		err = list.Add(p)
		if err != nil {
			context.progress.ColoredPrintf("@y[!]@| @!Unable to add package to repo %s: %s@|", p, err)
			continue
		}

		context.progress.ColoredPrintf("@g[+]@| %s added@|", p)
	}

	repo.UpdateRefList(debian.NewPackageRefListFromPackageList(list))

	err = localRepoCollection.Update(repo)
	if err != nil {
		return fmt.Errorf("unable to save: %s", err)
	}

	if cmd.Flag.Lookup("remove-files").Value.Get().(bool) {
		processedFiles = utils.StrSliceDeduplicate(processedFiles)

		for _, file := range processedFiles {
			err := os.Remove(file)
			if err != nil {
				return fmt.Errorf("unable to remove file: %s", err)
			}
		}
	}

	return err
}
示例#7
0
文件: import.go 项目: liftup/aptly
// ImportPackageFiles imports files into local repository
func ImportPackageFiles(list *PackageList, packageFiles []string, forceReplace bool, verifier utils.Verifier,
	pool aptly.PackagePool, collection *PackageCollection, reporter aptly.ResultReporter, restriction PackageQuery) (processedFiles []string, failedFiles []string, err error) {
	if forceReplace {
		list.PrepareIndex()
	}

	for _, file := range packageFiles {
		var (
			stanza Stanza
			p      *Package
		)

		candidateProcessedFiles := []string{}
		isSourcePackage := strings.HasSuffix(file, ".dsc")
		isUdebPackage := strings.HasSuffix(file, ".udeb")

		if isSourcePackage {
			stanza, err = GetControlFileFromDsc(file, verifier)

			if err == nil {
				stanza["Package"] = stanza["Source"]
				delete(stanza, "Source")

				p, err = NewSourcePackageFromControlFile(stanza)
			}
		} else {
			stanza, err = GetControlFileFromDeb(file)
			if isUdebPackage {
				p = NewUdebPackageFromControlFile(stanza)
			} else {
				p = NewPackageFromControlFile(stanza)
			}
		}
		if err != nil {
			reporter.Warning("Unable to read file %s: %s", file, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		if p.Name == "" {
			reporter.Warning("Empty package name on %s", file)
			failedFiles = append(failedFiles, file)
			continue
		}

		if p.Version == "" {
			reporter.Warning("Empty version on %s", file)
			failedFiles = append(failedFiles, file)
			continue
		}

		if p.Architecture == "" {
			reporter.Warning("Empty architecture on %s", file)
			failedFiles = append(failedFiles, file)
			continue
		}

		var checksums utils.ChecksumInfo
		checksums, err = utils.ChecksumsForFile(file)
		if err != nil {
			return nil, nil, err
		}

		if isSourcePackage {
			p.UpdateFiles(append(p.Files(), PackageFile{Filename: filepath.Base(file), Checksums: checksums}))
		} else {
			p.UpdateFiles([]PackageFile{{Filename: filepath.Base(file), Checksums: checksums}})
		}

		err = pool.Import(file, checksums.MD5)
		if err != nil {
			reporter.Warning("Unable to import file %s into pool: %s", file, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		candidateProcessedFiles = append(candidateProcessedFiles, file)

		// go over all files, except for the last one (.dsc/.deb itself)
		for _, f := range p.Files() {
			if filepath.Base(f.Filename) == filepath.Base(file) {
				continue
			}
			sourceFile := filepath.Join(filepath.Dir(file), filepath.Base(f.Filename))
			err = pool.Import(sourceFile, f.Checksums.MD5)
			if err != nil {
				reporter.Warning("Unable to import file %s into pool: %s", sourceFile, err)
				failedFiles = append(failedFiles, file)
				break
			}

			candidateProcessedFiles = append(candidateProcessedFiles, sourceFile)
		}
		if err != nil {
			// some files haven't been imported
			continue
		}

		if restriction != nil && !restriction.Matches(p) {
			reporter.Warning("%s has been ignored as it doesn't match restriction", p)
			failedFiles = append(failedFiles, file)
			continue
		}

		err = collection.Update(p)
		if err != nil {
			reporter.Warning("Unable to save package %s: %s", p, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		if forceReplace {
			conflictingPackages := list.Search(Dependency{Pkg: p.Name, Version: p.Version, Relation: VersionEqual, Architecture: p.Architecture}, true)
			for _, cp := range conflictingPackages {
				reporter.Removed("%s removed due to conflict with package being added", cp)
				list.Remove(cp)
			}
		}

		err = list.Add(p)
		if err != nil {
			reporter.Warning("Unable to add package to repo %s: %s", p, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		reporter.Added("%s added", p)
		processedFiles = append(processedFiles, candidateProcessedFiles...)
	}

	err = nil
	return
}
示例#8
0
func aptlyRepoAdd(cmd *commander.Command, args []string) error {
	var err error
	if len(args) < 2 {
		cmd.Usage()
		return commander.ErrCommandError
	}

	name := args[0]

	verifier := &utils.GpgVerifier{}

	repo, err := context.CollectionFactory().LocalRepoCollection().ByName(name)
	if err != nil {
		return fmt.Errorf("unable to add: %s", err)
	}

	err = context.CollectionFactory().LocalRepoCollection().LoadComplete(repo)
	if err != nil {
		return fmt.Errorf("unable to add: %s", err)
	}

	context.Progress().Printf("Loading packages...\n")

	list, err := deb.NewPackageListFromRefList(repo.RefList(), context.CollectionFactory().PackageCollection(), context.Progress())
	if err != nil {
		return fmt.Errorf("unable to load packages: %s", err)
	}

	packageFiles := []string{}
	failedFiles := []string{}

	for _, location := range args[1:] {
		info, err2 := os.Stat(location)
		if err2 != nil {
			context.Progress().ColoredPrintf("@y[!]@| @!Unable to process %s: %s@|", location, err2)
			failedFiles = append(failedFiles, location)
			continue
		}
		if info.IsDir() {
			err2 = filepath.Walk(location, func(path string, info os.FileInfo, err3 error) error {
				if err3 != nil {
					return err3
				}
				if info.IsDir() {
					return nil
				}

				if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") {
					packageFiles = append(packageFiles, path)
				}

				return nil
			})
		} else {
			if strings.HasSuffix(info.Name(), ".deb") || strings.HasSuffix(info.Name(), ".dsc") {
				packageFiles = append(packageFiles, location)
			} else {
				context.Progress().ColoredPrintf("@y[!]@| @!Unknwon file extenstion: %s@|", location)
				failedFiles = append(failedFiles, location)
				continue
			}
		}
	}

	processedFiles := []string{}
	sort.Strings(packageFiles)

	for _, file := range packageFiles {
		var (
			stanza deb.Stanza
			p      *deb.Package
		)

		candidateProcessedFiles := []string{}
		isSourcePackage := strings.HasSuffix(file, ".dsc")

		if isSourcePackage {
			stanza, err = deb.GetControlFileFromDsc(file, verifier)

			if err == nil {
				stanza["Package"] = stanza["Source"]
				delete(stanza, "Source")

				p, err = deb.NewSourcePackageFromControlFile(stanza)
			}
		} else {
			stanza, err = deb.GetControlFileFromDeb(file)
			p = deb.NewPackageFromControlFile(stanza)
		}
		if err != nil {
			context.Progress().ColoredPrintf("@y[!]@| @!Unable to read file %s: %s@|", file, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		var checksums utils.ChecksumInfo
		checksums, err = utils.ChecksumsForFile(file)
		if err != nil {
			return err
		}

		if isSourcePackage {
			p.UpdateFiles(append(p.Files(), deb.PackageFile{Filename: filepath.Base(file), Checksums: checksums}))
		} else {
			p.UpdateFiles([]deb.PackageFile{deb.PackageFile{Filename: filepath.Base(file), Checksums: checksums}})
		}

		err = context.PackagePool().Import(file, checksums.MD5)
		if err != nil {
			context.Progress().ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", file, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		candidateProcessedFiles = append(candidateProcessedFiles, file)

		// go over all files, except for the last one (.dsc/.deb itself)
		for _, f := range p.Files() {
			if filepath.Base(f.Filename) == filepath.Base(file) {
				continue
			}
			sourceFile := filepath.Join(filepath.Dir(file), filepath.Base(f.Filename))
			err = context.PackagePool().Import(sourceFile, f.Checksums.MD5)
			if err != nil {
				context.Progress().ColoredPrintf("@y[!]@| @!Unable to import file %s into pool: %s@|", sourceFile, err)
				failedFiles = append(failedFiles, file)
				break
			}

			candidateProcessedFiles = append(candidateProcessedFiles, sourceFile)
		}
		if err != nil {
			// some files haven't been imported
			continue
		}

		err = context.CollectionFactory().PackageCollection().Update(p)
		if err != nil {
			context.Progress().ColoredPrintf("@y[!]@| @!Unable to save package %s: %s@|", p, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		err = list.Add(p)
		if err != nil {
			context.Progress().ColoredPrintf("@y[!]@| @!Unable to add package to repo %s: %s@|", p, err)
			failedFiles = append(failedFiles, file)
			continue
		}

		context.Progress().ColoredPrintf("@g[+]@| %s added@|", p)
		processedFiles = append(processedFiles, candidateProcessedFiles...)
	}

	repo.UpdateRefList(deb.NewPackageRefListFromPackageList(list))

	err = context.CollectionFactory().LocalRepoCollection().Update(repo)
	if err != nil {
		return fmt.Errorf("unable to save: %s", err)
	}

	if context.flags.Lookup("remove-files").Value.Get().(bool) {
		processedFiles = utils.StrSliceDeduplicate(processedFiles)

		for _, file := range processedFiles {
			err := os.Remove(file)
			if err != nil {
				return fmt.Errorf("unable to remove file: %s", err)
			}
		}
	}

	if len(failedFiles) > 0 {
		context.Progress().ColoredPrintf("@y[!]@| @!Some files were skipped due to errors:@|")
		for _, file := range failedFiles {
			context.Progress().ColoredPrintf("  %s", file)
		}

		return fmt.Errorf("Some files failed to be added")
	}

	return err
}