// VendoredCleanUp cleans up vendored codebases after an update. // // This should _only_ be run for installations that do not want VCS repos inside // of the vendor/ directory. func VendoredCleanup(conf *cfg.Config) error { vend, err := gpath.Vendor() if err != nil { return err } for _, dep := range conf.Imports { if dep.UpdateAsVendored == true { msg.Info("Cleaning up vendored package %s\n", dep.Name) // Remove the VCS directory cwd := filepath.Join(vend, dep.Name) repo, err := dep.GetRepo(cwd) if err != nil { msg.Error("Error cleaning up %s:%s", dep.Name, err) continue } t := repo.Vcs() err = os.RemoveAll(cwd + string(os.PathSeparator) + "." + string(t)) if err != nil { msg.Error("Error cleaning up VCS dir for %s:%s", dep.Name, err) } } } return nil }
// Get fetches one or more dependencies and installs. // // This includes resolving dependency resolution and re-generating the lock file. func Get(names []string, installer *repo.Installer, insecure, skipRecursive bool) { base := gpath.Basepath() EnsureGopath() EnsureVendorDir() conf := EnsureConfig() glidefile, err := gpath.Glide() if err != nil { msg.Die("Could not find Glide file: %s", err) } // Add the packages to the config. if err := addPkgsToConfig(conf, names, insecure); err != nil { msg.Die("Failed to get new packages: %s", err) } // Fetch the new packages. Can't resolve versions via installer.Update if // get is called while the vendor/ directory is empty so we checkout // everything. installer.Checkout(conf, false) // Prior to resolving dependencies we need to start working with a clone // of the conf because we'll be making real changes to it. confcopy := conf.Clone() if !skipRecursive { // Get all repos and update them. // TODO: Can we streamline this in any way? The reason that we update all // of the dependencies is that we need to re-negotiate versions. For example, // if an existing dependency has the constraint >1.0 and this new package // adds the constraint <2.0, then this may re-resolve the existing dependency // to be between 1.0 and 2.0. But changing that dependency may then result // in that dependency's dependencies changing... so we sorta do the whole // thing to be safe. err = installer.Update(confcopy) if err != nil { msg.Die("Could not update packages: %s", err) } } // Set Reference if err := repo.SetReference(confcopy); err != nil { msg.Error("Failed to set references: %s", err) } // VendoredCleanup if installer.UpdateVendored { repo.VendoredCleanup(confcopy) } // Write YAML if err := conf.WriteFile(glidefile); err != nil { msg.Die("Failed to write glide YAML file: %s", err) } if !skipRecursive { // Write lock writeLock(conf, confcopy, base) } else { msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated") } }
// queueUnseenImports scans a package's imports and adds any new ones to the // processing queue. func (r *Resolver) queueUnseen(pkg string, queue *list.List) error { // A pkg is marked "seen" as soon as we have inspected it the first time. // Seen means that we have added all of its imports to the list. // Already queued indicates that we've either already put it into the queue // or intentionally not put it in the queue for fatal reasons (e.g. no // buildable source). deps, err := r.imports(pkg) if err != nil && !strings.HasPrefix(err.Error(), "no buildable Go source") { msg.Error("Could not find %s: %s", pkg, err) return err // NOTE: If we uncomment this, we get lots of "no buildable Go source" errors, // which don't ever seem to be helpful. They don't actually indicate an error // condition, and it's perfectly okay to run into that condition. //} else if err != nil { // msg.Warn(err.Error()) } for _, d := range deps { if _, ok := r.alreadyQ[d]; !ok { r.alreadyQ[d] = true queue.PushBack(d) } } return nil }
func (m *MissingPackageHandler) OnGopath(pkg string) (bool, error) { // If useGopath is false, we fall back to the strategy of fetching from // remote. if !m.useGopath { return m.NotFound(pkg) } root := util.GetRootFromPackage(pkg) // Skip any references to the root package. if root == m.RootPackage { return false, nil } msg.Info("Copying package %s from the GOPATH.", pkg) dest := filepath.Join(m.destination, pkg) // Find package on Gopath for _, gp := range gpath.Gopaths() { src := filepath.Join(gp, pkg) // FIXME: Should probably check if src is a dir or symlink. if _, err := os.Stat(src); err == nil { if err := os.MkdirAll(dest, os.ModeDir|0755); err != nil { return false, err } if err := gpath.CopyDir(src, dest); err != nil { return false, err } return true, nil } } msg.Error("Could not locate %s on the GOPATH, though it was found before.", pkg) return false, nil }
// EnsureGoVendor ensures that the Go version is correct. func EnsureGoVendor() { // 6l was removed in 1.5, when vendoring was introduced. cmd := exec.Command("go", "tool", "6l") if _, err := cmd.CombinedOutput(); err == nil { msg.Warn("You must install the Go 1.5 or greater toolchain to work with Glide.\n") os.Exit(1) } // This works with 1.5 and >=1.6. cmd = exec.Command("go", "env", "GO15VENDOREXPERIMENT") if out, err := cmd.CombinedOutput(); err != nil { msg.Error("Error looking for $GOVENDOREXPERIMENT: %s.\n", err) os.Exit(1) } else if strings.TrimSpace(string(out)) != "1" { msg.Warn("To use Glide, you must set GO15VENDOREXPERIMENT=1\n") os.Exit(1) } // Verify the setup isn't for the old version of glide. That is, this is // no longer assuming the _vendor directory as the GOPATH. Inform of // the change. if _, err := os.Stat("_vendor/"); err == nil { msg.Warn(`Your setup appears to be for the previous version of Glide. Previously, vendor packages were stored in _vendor/src/ and _vendor was set as your GOPATH. As of Go 1.5 the go tools recognize the vendor directory as a location for these files. Glide has embraced this. Please remove the _vendor directory or move the _vendor/src/ directory to vendor/.` + "\n") os.Exit(1) } }
// Process imports dependencies for a package func (d *VersionHandler) Process(pkg string) (e error) { root := util.GetRootFromPackage(pkg) // Skip any references to the root package. if root == d.RootPackage { return nil } // We have not tried to import, yet. // Should we look in places other than the root of the project? if d.Imported[root] == false { d.Imported[root] = true p := filepath.Join(d.Destination, root) f, deps, err := importer.Import(p) if f && err == nil { for _, dep := range deps { // The fist one wins. Would something smater than this be better? exists := d.Use.Get(dep.Name) if exists == nil && (dep.Reference != "" || dep.Repository != "") { d.Use.Add(dep.Name, dep) } } } else if err != nil { msg.Error("Unable to import from %s. Err: %s", root, err) e = err } } return }
// Plugin attempts to find and execute a plugin based on a command. // // Exit code 99 means the plugin was never executed. Code 1 means the program // exited badly. func Plugin(command string, args []string) { cwd, err := os.Getwd() if err != nil { msg.ExitCode(99) msg.Die("Could not get working directory: %s", err) } cmd := "glide-" + command var fullcmd string if fullcmd, err = exec.LookPath(cmd); err != nil { fullcmd = cwd + "/" + cmd if _, err := os.Stat(fullcmd); err != nil { msg.ExitCode(99) msg.Die("Command %s does not exist.", cmd) } } // Turning os.Args first argument from `glide` to `glide-command` args[0] = cmd // Removing the first argument (command) removed := false for i, v := range args { if removed == false && v == command { args = append(args[:i], args[i+1:]...) removed = true } } pa := os.ProcAttr{ Files: []*os.File{os.Stdin, os.Stdout, os.Stderr}, Dir: cwd, } msg.Debug("Delegating to plugin %s (%v)\n", fullcmd, args) proc, err := os.StartProcess(fullcmd, args, &pa) if err != nil { msg.Error("Failed to execute %s: %s", cmd, err) os.Exit(98) } if _, err := proc.Wait(); err != nil { msg.Error(err.Error()) os.Exit(1) } }
// NoVendor generates a list of source code directories, excepting `vendor/`. // // If "onlyGo" is true, only folders that have Go code in them will be returned. // // If suffix is true, this will append `/...` to every directory. func NoVendor(path string, onlyGo, suffix bool) { // This is responsible for printing the results of noVend. paths, err := noVend(path, onlyGo, suffix) if err != nil { msg.Error("Failed to walk file tree: %s", err) msg.Warn("FIXME: NoVendor should exit with non-zero exit code.") return } for _, p := range paths { msg.Puts(p) } }
// EnsureGopath fails if GOPATH is not set, or if $GOPATH/src is missing. // // Otherwise it returns the value of GOPATH. func EnsureGopath() string { gp := os.Getenv("GOPATH") if gp == "" { msg.Die("$GOPATH is not set.") } _, err := os.Stat(path.Join(gp, "src")) if err != nil { msg.Error("Could not find %s/src.\n", gp) msg.Info("As of Glide 0.5/Go 1.5, this is required.\n") msg.Die("Wihtout src, cannot continue. %s", err) } return gp }
func main() { app := cli.NewApp() app.Name = "glide" app.Usage = usage app.Version = version app.Flags = []cli.Flag{ cli.StringFlag{ Name: "yaml, y", Value: "glide.yaml", Usage: "Set a YAML configuration file.", }, cli.BoolFlag{ Name: "quiet, q", Usage: "Quiet (no info or debug messages)", }, cli.BoolFlag{ Name: "debug", Usage: "Print Debug messages (verbose)", }, cli.StringFlag{ Name: "home", Value: defaultGlideDir(), Usage: "The location of Glide files", EnvVar: "GLIDE_HOME", }, cli.BoolFlag{ Name: "no-color", Usage: "Turn off colored output for log messages", }, } app.CommandNotFound = func(c *cli.Context, command string) { // TODO: Set some useful env vars. action.Plugin(command, os.Args) } app.Before = startup app.Commands = commands() // Detect errors from the Before and After calls and exit on them. if err := app.Run(os.Args); err != nil { msg.Error(err.Error()) os.Exit(1) } // If there was a Error message exit non-zero. if msg.HasErrored() { m := msg.Color(msg.Red, "An Error has occured") msg.Msg(m) os.Exit(2) } }
// resolveList takes a list and resolves it. func (r *Resolver) resolveList(queue *list.List) ([]string, error) { var failedDep string for e := queue.Front(); e != nil; e = e.Next() { dep := e.Value.(string) //msg.Warn("#### %s ####", dep) //msg.Info("Seen Count: %d", len(r.seen)) // Catch the outtermost dependency. failedDep = dep err := filepath.Walk(dep, func(path string, fi os.FileInfo, err error) error { if err != nil && err != filepath.SkipDir { return err } // Skip files. if !fi.IsDir() { return nil } // Skip dirs that are not source. if !srcDir(fi) { //msg.Debug("Skip resource %s", fi.Name()) return filepath.SkipDir } // Anything that comes through here has already been through // the queue. r.alreadyQ[path] = true e := r.queueUnseen(path, queue) if err != nil { failedDep = path //msg.Error("Failed to fetch dependency %s: %s", path, err) } return e }) if err != nil && err != filepath.SkipDir { msg.Error("Dependency %s failed to resolve: %s.", failedDep, err) return []string{}, err } } res := make([]string, 0, queue.Len()) for e := queue.Front(); e != nil; e = e.Next() { res = append(res, e.Value.(string)) } return res, nil }
// Install installs a vendor directory based on an existing Glide configuration. func Install(installer *repo.Installer) { base := "." // Ensure GOPATH EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Lockfile exists if !gpath.HasLock(base) { msg.Info("Lock file (glide.lock) does not exist. Performing update.") Update(installer, false) return } // Load lockfile lock, err := LoadLockfile(base, conf) if err != nil { msg.Die("Could not load lockfile.") } // Delete unused packages if installer.DeleteUnused { // It's unclear whether this should operate off of the lock, or off // of the glide.yaml file. I'd think that doing this based on the // lock would be much more reliable. dependency.DeleteUnused(conf) } // Install newConf, err := installer.Install(lock, conf) if err != nil { msg.Die("Failed to install: %s", err) } msg.Info("Setting references.") // Set reference if err := repo.SetReference(newConf); err != nil { msg.Error("Failed to set references: %s (Skip to cleanup)", err) } // VendoredCleanup. This should ONLY be run if UpdateVendored was specified. if installer.UpdateVendored { repo.VendoredCleanup(newConf) } }
// EnsureGopath fails if GOPATH is not set, or if $GOPATH/src is missing. // // Otherwise it returns the value of GOPATH. func EnsureGopath() string { gps := gpath.Gopaths() if len(gps) == 0 { msg.Die("$GOPATH is not set.") } for _, gp := range gps { _, err := os.Stat(path.Join(gp, "src")) if err != nil { msg.Warn("%s", err) continue } return gp } msg.Error("Could not find any of %s/src.\n", strings.Join(gps, "/src, ")) msg.Info("As of Glide 0.5/Go 1.5, this is required.\n") msg.Die("Wihtout src, cannot continue.") return "" }
func Remove(packages []string, inst *repo.Installer) { base := gpath.Basepath() EnsureGopath() EnsureVendorDir() conf := EnsureConfig() glidefile, err := gpath.Glide() if err != nil { msg.Die("Could not find Glide file: %s", err) } msg.Info("Preparing to remove %d packages.", len(packages)) conf.Imports = rmDeps(packages, conf.Imports) conf.DevImports = rmDeps(packages, conf.DevImports) // Copy used to generate locks. confcopy := conf.Clone() confcopy.Imports = inst.List(confcopy) if err := repo.SetReference(confcopy); err != nil { msg.Error("Failed to set references: %s", err) } // TODO: Right now, there is no flag to enable this, so this will never be // run. I am not sure whether we should allow this in a rm op or not. if inst.UpdateVendored { repo.VendoredCleanup(confcopy) } // Write glide.yaml if err := conf.WriteFile(glidefile); err != nil { msg.Die("Failed to write glide YAML file: %s", err) } // Write glide lock writeLock(conf, confcopy, base) }
// Update updates repos and the lock file from the main glide yaml. func Update(installer *repo.Installer, skipRecursive bool) { base := "." EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Delete unused packages if installer.DeleteUnused { dependency.DeleteUnused(conf) } // Try to check out the initial dependencies. if err := installer.Checkout(conf, false); err != nil { msg.Die("Failed to do initial checkout of config: %s", err) } // Set the versions for the initial dependencies so that resolved dependencies // are rooted in the correct version of the base. if err := repo.SetReference(conf); err != nil { msg.Die("Failed to set initial config references: %s", err) } // Prior to resolving dependencies we need to start working with a clone // of the conf because we'll be making real changes to it. confcopy := conf.Clone() if !skipRecursive { // Get all repos and update them. err := installer.Update(confcopy) if err != nil { msg.Die("Could not update packages: %s", err) } // TODO: There is no support here for importing Godeps, GPM, and GB files. // I think that all we really need to do now is hunt for these files, and then // roll their version numbers into the config file. // Set references. There may be no remaining references to set since the // installer set them as it went to make sure it parsed the right imports // from the right version of the package. msg.Info("Setting references for remaining imports") if err := repo.SetReference(confcopy); err != nil { msg.Error("Failed to set references: %s (Skip to cleanup)", err) } } // Vendored cleanup // VendoredCleanup. This should ONLY be run if UpdateVendored was specified. if installer.UpdateVendored { repo.VendoredCleanup(confcopy) } // Write glide.yaml (Why? Godeps/GPM/GB?) // I think we don't need to write a new Glide file because update should not // change anything important. It will just generate information about // transative dependencies, all of which belongs exclusively in the lock // file, not the glide.yaml file. // TODO(mattfarina): Detect when a new dependency has been added or removed // from the project. A removed dependency should warn and an added dependency // should be added to the glide.yaml file. See issue #193. if !skipRecursive { // Write lock hash, err := conf.Hash() if err != nil { msg.Die("Failed to generate config hash. Unable to generate lock file.") } lock := cfg.NewLockfile(confcopy.Imports, hash) if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil { msg.Error("Could not write lock file to %s: %s", base, err) return } msg.Info("Project relies on %d dependencies.", len(confcopy.Imports)) } else { msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated") } }
// imports gets all of the imports for a given package. // // If the package is in GOROOT, this will return an empty list (but not // an error). // If it cannot resolve the pkg, it will return an error. func (r *Resolver) imports(pkg string) ([]string, error) { // If this pkg is marked seen, we don't scan it again. if _, ok := r.seen[pkg]; ok { msg.Debug("Already saw %s", pkg) return []string{}, nil } // FIXME: On error this should try to NotFound to the dependency, and then import // it again. p, err := r.BuildContext.ImportDir(pkg, 0) if err != nil { return []string{}, err } // It is okay to scan a package more than once. In some cases, this is // desirable because the package can change between scans (e.g. as a result // of a failed scan resolving the situation). msg.Debug("=> Scanning %s (%s)", p.ImportPath, pkg) r.seen[pkg] = true // Optimization: If it's in GOROOT, it has no imports worth scanning. if p.Goroot { return []string{}, nil } // We are only looking for dependencies in vendor. No root, cgo, etc. buf := []string{} for _, imp := range p.Imports { info := r.FindPkg(imp) switch info.Loc { case LocUnknown: // Do we resolve here? found, err := r.Handler.NotFound(imp) if err != nil { msg.Error("Failed to fetch %s: %s", imp, err) } if found { buf = append(buf, filepath.Join(r.VendorDir, filepath.FromSlash(imp))) continue } r.seen[info.Path] = true case LocVendor: //msg.Debug("Vendored: %s", imp) buf = append(buf, info.Path) case LocGopath: found, err := r.Handler.OnGopath(imp) if err != nil { msg.Error("Failed to fetch %s: %s", imp, err) } // If the Handler marks this as found, we drop it into the buffer // for subsequent processing. Otherwise, we assume that we're // in a less-than-perfect, but functional, situation. if found { buf = append(buf, filepath.Join(r.VendorDir, filepath.FromSlash(imp))) continue } msg.Warn("Package %s is on GOPATH, but not vendored. Ignoring.", imp) r.seen[info.Path] = true default: // Local packages are an odd case. CGO cannot be scanned. msg.Debug("===> Skipping %s", imp) } } return buf, nil }
// ResolveLocal resolves dependencies for the current project. // // This begins with the project, builds up a list of external dependencies. // // If the deep flag is set to true, this will then resolve all of the dependencies // of the dependencies it has found. If not, it will return just the packages that // the base project relies upon. func (r *Resolver) ResolveLocal(deep bool) ([]string, error) { // We build a list of local source to walk, then send this list // to resolveList. l := list.New() alreadySeen := map[string]bool{} err := filepath.Walk(r.basedir, func(path string, fi os.FileInfo, err error) error { if err != nil && err != filepath.SkipDir { return err } if !fi.IsDir() { return nil } if !srcDir(fi) { return filepath.SkipDir } // Scan for dependencies, and anything that's not part of the local // package gets added to the scan list. p, err := r.BuildContext.ImportDir(path, 0) if err != nil { if strings.HasPrefix(err.Error(), "no buildable Go source") { return nil } return err } // We are only looking for dependencies in vendor. No root, cgo, etc. for _, imp := range p.Imports { if alreadySeen[imp] { continue } alreadySeen[imp] = true info := r.FindPkg(imp) switch info.Loc { case LocUnknown, LocVendor: l.PushBack(filepath.Join(r.VendorDir, filepath.FromSlash(imp))) // Do we need a path on this? case LocGopath: if !strings.HasPrefix(info.Path, r.basedir) { // FIXME: This is a package outside of the project we're // scanning. It should really be on vendor. But we don't // want it to reference GOPATH. We want it to be detected // and moved. l.PushBack(filepath.Join(r.VendorDir, filepath.FromSlash(imp))) } } } return nil }) if err != nil { msg.Error("Failed to build an initial list of packages to scan: %s", err) return []string{}, err } if deep { return r.resolveList(l) } // If we're not doing a deep scan, we just convert the list into an // array and return. res := make([]string, 0, l.Len()) for e := l.Front(); e != nil; e = e.Next() { res = append(res, e.Value.(string)) } return res, nil }
// VcsUpdate updates to a particular checkout based on the VCS setting. func VcsUpdate(dep *cfg.Dependency, dest, home string, cache, cacheGopath, useGopath, force, updateVendored bool) error { // If the dependency has already been pinned we can skip it. This is a // faster path so we don't need to resolve it again. if dep.Pin != "" { msg.Debug("Dependency %s has already been pinned. Fetching updates skipped.", dep.Name) return nil } msg.Info("Fetching updates for %s.\n", dep.Name) if filterArchOs(dep) { msg.Info("%s is not used for %s/%s.\n", dep.Name, runtime.GOOS, runtime.GOARCH) return nil } // If destination doesn't exist we need to perform an initial checkout. if _, err := os.Stat(dest); os.IsNotExist(err) { if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil { msg.Warn("Unable to checkout %s\n", dep.Name) return err } } else { // At this point we have a directory for the package. // When the directory is not empty and has no VCS directory it's // a vendored files situation. empty, err := gpath.IsDirectoryEmpty(dest) if err != nil { return err } _, err = v.DetectVcsFromFS(dest) if updateVendored == false && empty == false && err == v.ErrCannotDetectVCS { msg.Warn("%s appears to be a vendored package. Unable to update. Consider the '--update-vendored' flag.\n", dep.Name) } else { if updateVendored == true && empty == false && err == v.ErrCannotDetectVCS { // A vendored package, no repo, and updating the vendored packages // has been opted into. msg.Info("%s is a vendored package. Updating.", dep.Name) err = os.RemoveAll(dest) if err != nil { msg.Error("Unable to update vendored dependency %s.\n", dep.Name) return err } else { dep.UpdateAsVendored = true } if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil { msg.Warn("Unable to checkout %s\n", dep.Name) return err } return nil } repo, err := dep.GetRepo(dest) // Tried to checkout a repo to a path that does not work. Either the // type or endpoint has changed. Force is being passed in so the old // location can be removed and replaced with the new one. // Warning, any changes in the old location will be deleted. // TODO: Put dirty checking in on the existing local checkout. if (err == v.ErrWrongVCS || err == v.ErrWrongRemote) && force == true { var newRemote string if len(dep.Repository) > 0 { newRemote = dep.Repository } else { newRemote = "https://" + dep.Name } msg.Warn("Replacing %s with contents from %s\n", dep.Name, newRemote) rerr := os.RemoveAll(dest) if rerr != nil { return rerr } if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil { msg.Warn("Unable to checkout %s\n", dep.Name) return err } } else if err != nil { return err } else if repo.IsDirty() { return fmt.Errorf("%s contains uncommited changes. Skipping update", dep.Name) } else { // Check if the current version is a tag or commit id. If it is // and that version is already checked out we can skip updating // which is faster than going out to the Internet to perform // an update. if dep.Reference != "" { version, err := repo.Version() if err != nil { return err } ib, err := isBranch(dep.Reference, repo) if err != nil { return err } // If the current version equals the ref and it's not a // branch it's a tag or commit id so we can skip // performing an update. if version == dep.Reference && !ib { msg.Info("%s is already set to version %s. Skipping update.", dep.Name, dep.Reference) return nil } } if err := repo.Update(); err != nil { msg.Warn("Download failed.\n") return err } } } } return nil }
// VcsVersion set the VCS version for a checkout. func VcsVersion(dep *cfg.Dependency, vend string) error { // If the dependency has already been pinned we can skip it. This is a // faster path so we don't need to resolve it again. if dep.Pin != "" { msg.Debug("Dependency %s has already been pinned. Setting version skipped.", dep.Name) return nil } cwd := filepath.Join(vend, dep.Name) // If there is no refernece configured there is nothing to set. if dep.Reference == "" { // Before exiting update the pinned version repo, err := dep.GetRepo(cwd) if err != nil { return err } dep.Pin, err = repo.Version() if err != nil { return err } return nil } // When the directory is not empty and has no VCS directory it's // a vendored files situation. empty, err := gpath.IsDirectoryEmpty(cwd) if err != nil { return err } _, err = v.DetectVcsFromFS(cwd) if empty == false && err == v.ErrCannotDetectVCS { msg.Warn("%s appears to be a vendored package. Unable to set new version. Consider the '--update-vendored' flag.\n", dep.Name) } else { repo, err := dep.GetRepo(cwd) if err != nil { return err } ver := dep.Reference // Referenes in Git can begin with a ^ which is similar to semver. // If there is a ^ prefix we assume it's a semver constraint rather than // part of the git/VCS commit id. if repo.IsReference(ver) && !strings.HasPrefix(ver, "^") { msg.Info("Setting version for %s to %s.\n", dep.Name, ver) } else { // Create the constraing first to make sure it's valid before // working on the repo. constraint, err := semver.NewConstraint(ver) // Make sure the constriant is valid. At this point it's not a valid // reference so if it's not a valid constrint we can exit early. if err != nil { msg.Warn("The reference '%s' is not valid\n", ver) return err } // Get the tags and branches (in that order) refs, err := getAllVcsRefs(repo) if err != nil { return err } // Convert and filter the list to semver.Version instances semvers := getSemVers(refs) // Sort semver list sort.Sort(sort.Reverse(semver.Collection(semvers))) found := false for _, v := range semvers { if constraint.Check(v) { found = true // If the constrint passes get the original reference ver = v.Original() break } } if found { msg.Info("Detected semantic version. Setting version for %s to %s.\n", dep.Name, ver) } else { msg.Warn("Unable to find semantic version for constraint %s %s\n", dep.Name, ver) } } if err := repo.UpdateVersion(ver); err != nil { msg.Error("Failed to set version to %s: %s\n", dep.Reference, err) return err } dep.Pin, err = repo.Version() if err != nil { return err } } return nil }
// resolveImports takes a list of existing packages and resolves their imports. // // It returns a list of all of the packages that it can determine are required // for the given code to function. // // The expectation is that each item in the queue is an absolute path to a // vendored package. This attempts to read that package, and then find // its referenced packages. Those packages are then added to the list // to be scanned next. // // The resolver's handler is used in the cases where a package cannot be // located. func (r *Resolver) resolveImports(queue *list.List) ([]string, error) { for e := queue.Front(); e != nil; e = e.Next() { vdep := e.Value.(string) dep := r.stripv(vdep) // Check if marked in the Q and then explicitly mark it. We want to know // if it had previously been marked and ensure it for the future. _, foundQ := r.alreadyQ[dep] r.alreadyQ[dep] = true // If we've already encountered an error processing this dependency // skip it. _, foundErr := r.hadError[dep] if foundErr { continue } // Skip ignored packages if r.Config.HasIgnore(dep) { msg.Info("Ignoring: %s", dep) continue } r.VersionHandler.Process(dep) // Here, we want to import the package and see what imports it has. msg.Debug("Trying to open %s", vdep) pkg, err := r.BuildContext.ImportDir(vdep, 0) if err != nil { msg.Debug("ImportDir error on %s: %s", vdep, err) if strings.HasPrefix(err.Error(), "no buildable Go source") { msg.Debug("No subpackages declared. Skipping %s.", dep) continue } else if os.IsNotExist(err) && !foundErr && !foundQ { // If the location doesn't exist, there hasn't already been an // error, it's not already been in the Q then try to fetch it. // When there's an error or it's already in the Q (it should be // fetched if it's marked in r.alreadyQ) we skip to make sure // not to get stuck in a recursion. // If the location doesn't exist try to fetch it. if ok, err2 := r.Handler.NotFound(dep); ok { r.alreadyQ[dep] = true // By adding to the queue it will get reprocessed now that // it exists. queue.PushBack(r.vpath(dep)) r.VersionHandler.SetVersion(dep) } else if err2 != nil { r.hadError[dep] = true msg.Error("Error looking for %s: %s", dep, err2) } else { r.hadError[dep] = true // TODO (mpb): Should we toss this into a Handler to // see if this is on GOPATH and copy it? msg.Info("Not found in vendor/: %s (1)", dep) } } else { r.hadError[dep] = true msg.Error("Error scanning %s: %s", dep, err) } continue } // Range over all of the identified imports and see which ones we // can locate. for _, imp := range pkg.Imports { pi := r.FindPkg(imp) if pi.Loc != LocCgo && pi.Loc != LocGoroot { msg.Debug("Package %s imports %s", dep, imp) } switch pi.Loc { case LocVendor: msg.Debug("In vendor: %s", imp) if _, ok := r.alreadyQ[imp]; !ok { msg.Debug("Marking %s to be scanned.", imp) r.alreadyQ[imp] = true queue.PushBack(r.vpath(imp)) if err := r.Handler.InVendor(imp); err == nil { r.VersionHandler.SetVersion(imp) } else { msg.Warn("Error updating %s: %s", imp, err) } r.VersionHandler.SetVersion(imp) } case LocUnknown: msg.Debug("Missing %s. Trying to resolve.", imp) if ok, err := r.Handler.NotFound(imp); ok { r.alreadyQ[imp] = true queue.PushBack(r.vpath(imp)) r.VersionHandler.SetVersion(imp) } else if err != nil { r.hadError[dep] = true msg.Warn("Error looking for %s: %s", imp, err) } else { r.hadError[dep] = true msg.Info("Not found: %s (2)", imp) } case LocGopath: msg.Debug("Found on GOPATH, not vendor: %s", imp) if _, ok := r.alreadyQ[imp]; !ok { // Only scan it if it gets moved into vendor/ if ok, _ := r.Handler.OnGopath(imp); ok { r.alreadyQ[imp] = true queue.PushBack(r.vpath(imp)) r.VersionHandler.SetVersion(imp) } } } } } // FIXME: From here to the end is a straight copy of the resolveList() func. res := make([]string, 0, queue.Len()) // In addition to generating a list for e := queue.Front(); e != nil; e = e.Next() { t := r.stripv(e.Value.(string)) root, sp := util.NormalizeName(t) // TODO(mattfarina): Need to eventually support devImport existing := r.Config.Imports.Get(root) if existing != nil { if sp != "" && !existing.HasSubpackage(sp) { existing.Subpackages = append(existing.Subpackages, sp) } } else { newDep := &cfg.Dependency{ Name: root, } if sp != "" { newDep.Subpackages = []string{sp} } r.Config.Imports = append(r.Config.Imports, newDep) } res = append(res, t) } return res, nil }
// resolveList takes a list and resolves it. // // This walks the entire file tree for the given dependencies, not just the // parts that are imported directly. Using this will discover dependencies // regardless of OS, and arch. func (r *Resolver) resolveList(queue *list.List) ([]string, error) { var failedDep string for e := queue.Front(); e != nil; e = e.Next() { dep := e.Value.(string) t := strings.TrimPrefix(dep, r.VendorDir+string(os.PathSeparator)) if r.Config.HasIgnore(t) { msg.Info("Ignoring: %s", t) continue } r.VersionHandler.Process(t) //msg.Warn("#### %s ####", dep) //msg.Info("Seen Count: %d", len(r.seen)) // Catch the outtermost dependency. failedDep = dep err := filepath.Walk(dep, func(path string, fi os.FileInfo, err error) error { if err != nil && err != filepath.SkipDir { return err } // Skip files. if !fi.IsDir() { return nil } // Skip dirs that are not source. if !srcDir(fi) { //msg.Debug("Skip resource %s", fi.Name()) return filepath.SkipDir } // Anything that comes through here has already been through // the queue. r.alreadyQ[path] = true e := r.queueUnseen(path, queue) if err != nil { failedDep = path //msg.Error("Failed to fetch dependency %s: %s", path, err) } return e }) if err != nil && err != filepath.SkipDir { msg.Error("Dependency %s failed to resolve: %s.", failedDep, err) return []string{}, err } } res := make([]string, 0, queue.Len()) // In addition to generating a list for e := queue.Front(); e != nil; e = e.Next() { t := strings.TrimPrefix(e.Value.(string), r.VendorDir+string(os.PathSeparator)) root, sp := util.NormalizeName(t) // TODO(mattfarina): Need to eventually support devImport existing := r.Config.Imports.Get(root) if existing != nil { if sp != "" && !existing.HasSubpackage(sp) { existing.Subpackages = append(existing.Subpackages, sp) } } else { newDep := &cfg.Dependency{ Name: root, } if sp != "" { newDep.Subpackages = []string{sp} } r.Config.Imports = append(r.Config.Imports, newDep) } res = append(res, e.Value.(string)) } return res, nil }