// SetReference is a command to set the VCS reference (commit id, tag, etc) for // a project. func SetReference(conf *cfg.Config, resolveTest bool) error { cwd, err := gpath.Vendor() if err != nil { return err } if len(conf.Imports) == 0 && len(conf.DevImports) == 0 { msg.Info("No references set.\n") return nil } done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup for i := 0; i < concurrentWorkers; i++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: if err := VcsVersion(dep, cwd); err != nil { msg.Err("Failed to set version on %s to %s: %s\n", dep.Name, dep.Reference, err) } wg.Done() case <-done: return } } }(in) } for _, dep := range conf.Imports { if !conf.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } if resolveTest { for _, dep := range conf.DevImports { if !conf.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } } wg.Wait() // Close goroutines setting the version for i := 0; i < concurrentWorkers; i++ { done <- struct{}{} } // close(done) // close(in) return nil }
func writeLock(conf, confcopy *cfg.Config, base string) { hash, err := conf.Hash() if err != nil { msg.Die("Failed to generate config hash. Unable to generate lock file.") } lock := cfg.NewLockfile(confcopy.Imports, hash) if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil { msg.Die("Failed to write glide lock file: %s", err) } }
// ConcurrentUpdate takes a list of dependencies and updates in parallel. func ConcurrentUpdate(deps []*cfg.Dependency, i *Installer, c *cfg.Config) error { done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: loc := dep.Remote() key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) if err := VcsUpdate(dep, i.Force, i.Updated); err != nil { msg.Err("Update failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range deps { if !c.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } return returnErr }
func appendImports(deps []*cfg.Dependency, config *cfg.Config) { if len(deps) == 0 { msg.Info("No dependencies added.") return } //Append deps to existing dependencies. if err := config.AddImport(deps...); err != nil { msg.Die("Failed to add imports: %s", err) } }
// writeConfigToFileOrStdout is a convenience function for import utils. func writeConfigToFileOrStdout(config *cfg.Config, dest string) { if dest != "" { if err := config.WriteFile(dest); err != nil { msg.Die("Failed to write %s: %s", gpath.GlideFile, err) } } else { o, err := config.Marshal() if err != nil { msg.Die("Error encoding config: %s", err) } msg.Default.Stdout.Write(o) } }
func guessImportDeps(base string, config *cfg.Config) { msg.Info("Attempting to import from other package managers (use --skip-import to skip)") deps := []*cfg.Dependency{} absBase, err := filepath.Abs(base) if err != nil { msg.Die("Failed to resolve location of %s: %s", base, err) } if d, ok := guessImportGodep(absBase); ok { msg.Info("Importing Godep configuration") msg.Warn("Godep uses commit id versions. Consider using Semantic Versions with Glide") deps = d } else if d, ok := guessImportGPM(absBase); ok { msg.Info("Importing GPM configuration") deps = d } else if d, ok := guessImportGB(absBase); ok { msg.Info("Importing GB configuration") deps = d } for _, i := range deps { if i.Reference == "" { msg.Info("--> Found imported reference to %s", i.Name) } else { msg.Info("--> Found imported reference to %s at revision %s", i.Name, i.Reference) } config.Imports = append(config.Imports, i) } }
func exportFlattenedDeps(conf *cfg.Config, in map[string]*cfg.Dependency) { out := make([]*cfg.Dependency, len(in)) i := 0 for _, v := range in { out[i] = v i++ } conf.Imports = out }
// LoadLockfile loads the contents of a glide.lock file. // // TODO: This should go in another package. func LoadLockfile(base string, conf *cfg.Config) (*cfg.Lockfile, error) { yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile)) if err != nil { return nil, err } lock, err := cfg.LockfileFromYaml(yml) if err != nil { return nil, err } hash, err := conf.Hash() if err != nil { return nil, err } if hash != lock.Hash { msg.Warn("Lock file may be out of date. Hash check of YAML failed. You may need to run 'update'") } return lock, nil }
func TestAddPkgsToConfig(t *testing.T) { // Route output to discard so it's not displayed with the test output. o := msg.Default.Stderr msg.Default.Stderr = ioutil.Discard conf := new(cfg.Config) dep := new(cfg.Dependency) dep.Name = "github.com/Masterminds/cookoo" dep.Subpackages = append(dep.Subpackages, "convert") conf.Imports = append(conf.Imports, dep) names := []string{ "github.com/Masterminds/cookoo/fmt", "github.com/Masterminds/semver", } addPkgsToConfig(conf, names, false, true) if !conf.HasDependency("github.com/Masterminds/semver") { t.Error("addPkgsToConfig failed to add github.com/Masterminds/semver") } d := conf.Imports.Get("github.com/Masterminds/cookoo") found := false for _, s := range d.Subpackages { if s == "fmt" { found = true } } if !found { t.Error("addPkgsToConfig failed to add subpackage to existing import") } // Restore messaging to original location msg.Default.Stderr = o }
// Export from the cache to the vendor directory func (i *Installer) Export(conf *cfg.Config) error { tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor") if err != nil { return err } defer func() { err = os.RemoveAll(tempDir) if err != nil { msg.Err(err.Error()) } }() vp := filepath.Join(tempDir, "vendor") err = os.MkdirAll(vp, 0755) msg.Info("Exporting resolved dependencies...") done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: loc := dep.Remote() key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) cdir := filepath.Join(cache.Location(), "src", key) repo, err := dep.GetRepo(cdir) if err != nil { msg.Die(err.Error()) } msg.Info("--> Exporting %s", dep.Name) if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil { msg.Err("Export failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range conf.Imports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } if i.ResolveTest { for _, dep := range conf.DevImports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } if returnErr != nil { return returnErr } msg.Info("Replacing existing vendor dependencies") err = os.RemoveAll(i.VendorPath()) if err != nil { return err } err = os.Rename(vp, i.VendorPath()) if err != nil { // When there are different physical devices we cannot rename cross device. // Instead we copy. switch terr := err.(type) { case *os.LinkError: // syscall.EXDEV is the common name for the cross device link error // which has varying output text across different operating systems. if terr.Err == syscall.EXDEV { msg.Debug("Cross link err, trying manual copy: %s", err) return gpath.CopyDir(vp, i.VendorPath()) } else if runtime.GOOS == "windows" { // In windows it can drop down to an operating system call that // returns an operating system error with a different number and // message. Checking for that as a fall back. noerr, ok := terr.Err.(syscall.Errno) // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. // See https://msdn.microsoft.com/en-us/library/cc231199.aspx if ok && noerr == 0x11 { msg.Debug("Cross link err on Windows, trying manual copy: %s", err) return gpath.CopyDir(vp, i.VendorPath()) } } } } return err }
// GuessDeps tries to get the dependencies for the current directory. // // Params // - dirname (string): Directory to use as the base. Default: "." // - skipImport (book): Whether to skip importing from Godep, GPM, and gb func GuessDeps(c cookoo.Context, p *cookoo.Params) (interface{}, cookoo.Interrupt) { buildContext, err := util.GetBuildContext() if err != nil { return nil, err } base := p.Get("dirname", ".").(string) skipImport := p.Get("skipImport", false).(bool) name := guessPackageName(buildContext, base) Info("Generating a YAML configuration file and guessing the dependencies") config := new(cfg.Config) // Get the name of the top level package config.Name = name // Import by looking at other package managers and looking over the // entire directory structure. // Attempt to import from other package managers. if !skipImport { Info("Attempting to import from other package managers (use --skip-import to skip)") deps := []*cfg.Dependency{} absBase, err := filepath.Abs(base) if err != nil { return nil, err } if d, ok := guessImportGodep(absBase); ok { Info("Importing Godep configuration") Warn("Godep uses commit id versions. Consider using Semantic Versions with Glide") deps = d } else if d, ok := guessImportGPM(absBase); ok { Info("Importing GPM configuration") deps = d } else if d, ok := guessImportGB(absBase); ok { Info("Importing GB configuration") deps = d } for _, i := range deps { Info("Found imported reference to %s\n", i.Name) config.Imports = append(config.Imports, i) } } // Resolve dependencies by looking at the tree. r, err := dependency.NewResolver(base) if err != nil { return nil, err } h := &dependency.DefaultMissingPackageHandler{Missing: []string{}, Gopath: []string{}} r.Handler = h sortable, err := r.ResolveLocal(false) if err != nil { return nil, err } sort.Strings(sortable) vpath := r.VendorDir if !strings.HasSuffix(vpath, "/") { vpath = vpath + string(os.PathSeparator) } for _, pa := range sortable { n := strings.TrimPrefix(pa, vpath) root := util.GetRootFromPackage(n) if !config.HasDependency(root) { Info("Found reference to %s\n", n) d := &cfg.Dependency{ Name: root, } subpkg := strings.TrimPrefix(n, root) if len(subpkg) > 0 && subpkg != "/" { d.Subpackages = []string{subpkg} } config.Imports = append(config.Imports, d) } else { subpkg := strings.TrimPrefix(n, root) if len(subpkg) > 0 && subpkg != "/" { subpkg = strings.TrimPrefix(subpkg, "/") d := config.Imports.Get(root) f := false for _, v := range d.Subpackages { if v == subpkg { f = true } } if !f { Info("Adding sub-package %s to %s\n", subpkg, root) d.Subpackages = append(d.Subpackages, subpkg) } } } } return config, nil }
// guessDeps attempts to resolve all of the dependencies for a given project. // // base is the directory to start with. // skipImport will skip running the automatic imports. // // FIXME: This function is likely a one-off that has a more standard alternative. // It's also long and could use a refactor. func guessDeps(base string, skipImport bool) *cfg.Config { buildContext, err := util.GetBuildContext() if err != nil { msg.Die("Failed to build an import context: %s", err) } name := buildContext.PackageName(base) msg.Info("Generating a YAML configuration file and guessing the dependencies") config := new(cfg.Config) // Get the name of the top level package config.Name = name // Import by looking at other package managers and looking over the // entire directory structure. // Attempt to import from other package managers. if !skipImport { guessImportDeps(base, config) } importLen := len(config.Imports) if importLen == 0 { msg.Info("Scanning code to look for dependencies") } else { msg.Info("Scanning code to look for dependencies not found in import") } // Resolve dependencies by looking at the tree. r, err := dependency.NewResolver(base) if err != nil { msg.Die("Error creating a dependency resolver: %s", err) } h := &dependency.DefaultMissingPackageHandler{Missing: []string{}, Gopath: []string{}} r.Handler = h sortable, err := r.ResolveLocal(false) if err != nil { msg.Die("Error resolving local dependencies: %s", err) } sort.Strings(sortable) vpath := r.VendorDir if !strings.HasSuffix(vpath, "/") { vpath = vpath + string(os.PathSeparator) } for _, pa := range sortable { n := strings.TrimPrefix(pa, vpath) root, subpkg := util.NormalizeName(n) if !config.HasDependency(root) && root != config.Name { msg.Info("--> Found reference to %s\n", n) d := &cfg.Dependency{ Name: root, } if len(subpkg) > 0 { d.Subpackages = []string{subpkg} } config.Imports = append(config.Imports, d) } else if config.HasDependency(root) { if len(subpkg) > 0 { subpkg = strings.TrimPrefix(subpkg, "/") d := config.Imports.Get(root) if !d.HasSubpackage(subpkg) { msg.Info("--> Adding sub-package %s to %s\n", subpkg, root) d.Subpackages = append(d.Subpackages, subpkg) } } } } if len(config.Imports) == importLen && importLen != 0 { msg.Info("--> Code scanning found no additional imports") } return config }
// SetReference is a command to set the VCS reference (commit id, tag, etc) for // a project. func SetReference(conf *cfg.Config, resolveTest bool) error { if len(conf.Imports) == 0 && len(conf.DevImports) == 0 { msg.Info("No references set.\n") return nil } done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for i := 0; i < concurrentWorkers; i++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: var loc string if dep.Repository != "" { loc = dep.Repository } else { loc = "https://" + dep.Name } key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) if err := VcsVersion(dep); err != nil { msg.Err("Failed to set version on %s to %s: %s\n", dep.Name, dep.Reference, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range conf.Imports { if !conf.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } if resolveTest { for _, dep := range conf.DevImports { if !conf.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } } wg.Wait() // Close goroutines setting the version for i := 0; i < concurrentWorkers; i++ { done <- struct{}{} } // close(done) // close(in) return returnErr }
// ConcurrentUpdate takes a list of dependencies and updates in parallel. func ConcurrentUpdate(deps []*cfg.Dependency, cwd string, i *Installer, c *cfg.Config) error { done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error msg.Info("Downloading dependencies. Please wait...") for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: var loc string if dep.Repository != "" { loc = dep.Repository } else { loc = "https://" + dep.Name } key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) dest := filepath.Join(i.VendorPath(), dep.Name) if err := VcsUpdate(dep, dest, i.Home, i.UseCache, i.UseCacheGopath, i.UseGopath, i.Force, i.UpdateVendored, i.Updated); err != nil { msg.Err("Update failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range deps { if !c.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } return returnErr }
// addPkgsToConfig adds the given packages to the config file. // // Along the way it: // - ensures that this package is not in the ignore list // - checks to see if this is already in the dependency list. // - splits version of of package name and adds the version attribute // - separates repo from packages // - sets up insecure repo URLs where necessary // - generates a list of subpackages func addPkgsToConfig(conf *cfg.Config, names []string, insecure, nonInteract bool) (int, error) { if len(names) == 1 { msg.Info("Preparing to install %d package.", len(names)) } else { msg.Info("Preparing to install %d packages.", len(names)) } numAdded := 0 for _, name := range names { var version string parts := strings.Split(name, "#") if len(parts) > 1 { name = parts[0] version = parts[1] } msg.Info("Attempting to get package %s", name) root, subpkg := util.NormalizeName(name) if len(root) == 0 { return 0, fmt.Errorf("Package name is required for %q.", name) } if conf.HasDependency(root) { // Check if the subpackage is present. if subpkg != "" { dep := conf.Imports.Get(root) if dep.HasSubpackage(subpkg) { msg.Warn("--> Package %q is already in glide.yaml. Skipping", name) } else { dep.Subpackages = append(dep.Subpackages, subpkg) msg.Info("--> Adding sub-package %s to existing import %s", subpkg, root) numAdded++ } } else { msg.Warn("--> Package %q is already in glide.yaml. Skipping", root) } continue } if conf.HasIgnore(root) { msg.Warn("--> Package %q is set to be ignored in glide.yaml. Skipping", root) continue } dep := &cfg.Dependency{ Name: root, } // When retriving from an insecure location set the repo to the // insecure location. if insecure { dep.Repository = "http://" + root } if version != "" { dep.Reference = version } else if !nonInteract { getWizard(dep) } if len(subpkg) > 0 { dep.Subpackages = []string{subpkg} } if dep.Reference != "" { msg.Info("--> Adding %s to your configuration with the version %s", dep.Name, dep.Reference) } else { msg.Info("--> Adding %s to your configuration %s", dep.Name) } conf.Imports = append(conf.Imports, dep) numAdded++ } return numAdded, nil }
// guessDeps attempts to resolve all of the dependencies for a given project. // // base is the directory to start with. // skipImport will skip running the automatic imports. // // FIXME: This function is likely a one-off that has a more standard alternative. // It's also long and could use a refactor. func guessDeps(base string, skipImport bool) *cfg.Config { buildContext, err := util.GetBuildContext() if err != nil { msg.Die("Failed to build an import context: %s", err) } name := buildContext.PackageName(base) msg.Info("Generating a YAML configuration file and guessing the dependencies") config := new(cfg.Config) // Get the name of the top level package config.Name = name // Import by looking at other package managers and looking over the // entire directory structure. // Attempt to import from other package managers. if !skipImport { msg.Info("Attempting to import from other package managers (use --skip-import to skip)") deps := []*cfg.Dependency{} absBase, err := filepath.Abs(base) if err != nil { msg.Die("Failed to resolve location of %s: %s", base, err) } if d, ok := guessImportGodep(absBase); ok { msg.Info("Importing Godep configuration") msg.Warn("Godep uses commit id versions. Consider using Semantic Versions with Glide") deps = d } else if d, ok := guessImportGPM(absBase); ok { msg.Info("Importing GPM configuration") deps = d } else if d, ok := guessImportGB(absBase); ok { msg.Info("Importing GB configuration") deps = d } for _, i := range deps { msg.Info("Found imported reference to %s\n", i.Name) config.Imports = append(config.Imports, i) } } // Resolve dependencies by looking at the tree. r, err := dependency.NewResolver(base) if err != nil { msg.Die("Error creating a dependency resolver: %s", err) } h := &dependency.DefaultMissingPackageHandler{Missing: []string{}, Gopath: []string{}} r.Handler = h sortable, err := r.ResolveLocal(false) if err != nil { msg.Die("Error resolving local dependencies: %s", err) } sort.Strings(sortable) vpath := r.VendorDir if !strings.HasSuffix(vpath, "/") { vpath = vpath + string(os.PathSeparator) } for _, pa := range sortable { n := strings.TrimPrefix(pa, vpath) root, subpkg := util.NormalizeName(n) if !config.HasDependency(root) { msg.Info("Found reference to %s\n", n) d := &cfg.Dependency{ Name: root, } if len(subpkg) > 0 { d.Subpackages = []string{subpkg} } config.Imports = append(config.Imports, d) } else { if len(subpkg) > 0 { subpkg = strings.TrimPrefix(subpkg, "/") d := config.Imports.Get(root) if !d.HasSubpackage(subpkg) { msg.Info("Adding sub-package %s to %s\n", subpkg, root) d.Subpackages = append(d.Subpackages, subpkg) } } } } return config }
// Export from the cache to the vendor directory func (i *Installer) Export(conf *cfg.Config) error { tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor") if err != nil { return err } defer func() { err = os.RemoveAll(tempDir) if err != nil { msg.Err(err.Error()) } }() vp := filepath.Join(tempDir, "vendor") err = os.MkdirAll(vp, 0755) msg.Info("Exporting resolved dependencies...") done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: loc := dep.Remote() key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) cdir := filepath.Join(cache.Location(), "src", key) repo, err := dep.GetRepo(cdir) if err != nil { msg.Die(err.Error()) } msg.Info("--> Exporting %s", dep.Name) if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil { msg.Err("Export failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range conf.Imports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } if i.ResolveTest { for _, dep := range conf.DevImports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } if returnErr != nil { return returnErr } msg.Info("Replacing existing vendor dependencies") err = os.RemoveAll(i.VendorPath()) if err != nil { return err } err = os.Rename(vp, i.VendorPath()) // When there are different physical devices we cannot rename cross device. // Fall back to manual copy. if err != nil && strings.Contains(err.Error(), "cross-device link") { msg.Debug("Cross link err, trying manual copy: %s", err) err = gpath.CopyDir(vp, i.VendorPath()) } return err }
// Update updates all dependencies. // // It begins with the dependencies in the config file, but also resolves // transitive dependencies. The returned lockfile has all of the dependencies // listed, but the version reconciliation has not been done. // // In other words, all versions in the Lockfile will be empty. func (i *Installer) Update(conf *cfg.Config) error { base := "." ic := newImportCache() m := &MissingPackageHandler{ home: i.Home, force: i.Force, Config: conf, Use: ic, updated: i.Updated, } v := &VersionHandler{ Use: ic, Imported: make(map[string]bool), Conflicts: make(map[string]bool), Config: conf, } // Update imports res, err := dependency.NewResolver(base) res.ResolveTest = i.ResolveTest if err != nil { msg.Die("Failed to create a resolver: %s", err) } res.Config = conf res.Handler = m res.VersionHandler = v res.ResolveAllFiles = i.ResolveAllFiles msg.Info("Resolving imports") imps, timps, err := res.ResolveLocal(false) if err != nil { msg.Die("Failed to resolve local packages: %s", err) } var deps cfg.Dependencies var tdeps cfg.Dependencies for _, v := range imps { n := res.Stripv(v) if conf.HasIgnore(n) { continue } rt, sub := util.NormalizeName(n) if sub == "" { sub = "." } d := deps.Get(rt) if d == nil { nd := &cfg.Dependency{ Name: rt, Subpackages: []string{sub}, } deps = append(deps, nd) } else if !d.HasSubpackage(sub) { d.Subpackages = append(d.Subpackages, sub) } } if i.ResolveTest { for _, v := range timps { n := res.Stripv(v) if conf.HasIgnore(n) { continue } rt, sub := util.NormalizeName(n) if sub == "" { sub = "." } d := deps.Get(rt) if d == nil { d = tdeps.Get(rt) } if d == nil { nd := &cfg.Dependency{ Name: rt, Subpackages: []string{sub}, } tdeps = append(tdeps, nd) } else if !d.HasSubpackage(sub) { d.Subpackages = append(d.Subpackages, sub) } } } _, err = allPackages(deps, res, false) if err != nil { msg.Die("Failed to retrieve a list of dependencies: %s", err) } if i.ResolveTest { msg.Debug("Resolving test dependencies") _, err = allPackages(tdeps, res, true) if err != nil { msg.Die("Failed to retrieve a list of test dependencies: %s", err) } } msg.Info("Downloading dependencies. Please wait...") err = ConcurrentUpdate(conf.Imports, i, conf) if err != nil { return err } if i.ResolveTest { err = ConcurrentUpdate(conf.DevImports, i, conf) if err != nil { return err } } return nil }
// addPkgsToConfig adds the given packages to the config file. // // Along the way it: // - ensures that this package is not in the ignore list // - checks to see if this is already in the dependency list. // - splits version of of package name and adds the version attribute // - seperates repo from packages // - sets up insecure repo URLs where necessary // - generates a list of subpackages func addPkgsToConfig(conf *cfg.Config, names []string, insecure bool) error { msg.Info("Preparing to install %d package.", len(names)) for _, name := range names { var version string parts := strings.Split(name, "#") if len(parts) > 1 { name = parts[0] version = parts[1] } root := util.GetRootFromPackage(name) if len(root) == 0 { return fmt.Errorf("Package name is required for %q.", name) } if conf.HasDependency(root) { // Check if the subpackage is present. subpkg := strings.TrimPrefix(name, root) subpkg = strings.TrimPrefix(subpkg, "/") if subpkg != "" { found := false dep := conf.Imports.Get(root) for _, s := range dep.Subpackages { if s == subpkg { found = true break } } if found { msg.Warn("Package %q is already in glide.yaml. Skipping", name) } else { dep.Subpackages = append(dep.Subpackages, subpkg) msg.Info("Adding sub-package %s to existing import %s", subpkg, root) } } else { msg.Warn("Package %q is already in glide.yaml. Skipping", root) } continue } if conf.HasIgnore(root) { msg.Warn("Package %q is set to be ignored in glide.yaml. Skipping", root) continue } dep := &cfg.Dependency{ Name: root, } if version != "" { dep.Reference = version } // When retriving from an insecure location set the repo to the // insecure location. if insecure { dep.Repository = "http://" + root } subpkg := strings.TrimPrefix(name, root) if len(subpkg) > 0 && subpkg != "/" { dep.Subpackages = []string{strings.TrimPrefix(subpkg, "/")} } if dep.Reference != "" { msg.Info("Importing %s with the version %s", dep.Name, dep.Reference) } else { msg.Info("Importing %s", dep.Name) } conf.Imports = append(conf.Imports, dep) } return nil }