// ConcurrentUpdate takes a list of dependencies and updates in parallel. func ConcurrentUpdate(deps []*cfg.Dependency, i *Installer, c *cfg.Config) error { done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: loc := dep.Remote() key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) if err := VcsUpdate(dep, i.Force, i.Updated); err != nil { msg.Err("Update failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range deps { if !c.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } return returnErr }
func wizardFindVersions(d *cfg.Dependency) { l, err := cache.Location() if err != nil { msg.Debug("Problem detecting cache location: %s", err) return } var remote string if d.Repository != "" { remote = d.Repository } else { remote = "https://" + d.Name } key, err := cache.Key(remote) if err != nil { msg.Debug("Problem generating cache key for %s: %s", remote, err) return } local := filepath.Join(l, "src", key) repo, err := vcs.NewRepo(remote, local) if err != nil { msg.Debug("Problem getting repo instance: %s", err) return } var useLocal bool if _, err = os.Stat(local); err == nil { useLocal = true } // Git endpoints allow for querying without fetching the codebase locally. // We try that first to avoid fetching right away. Is this premature // optimization? cc := true if !useLocal && repo.Vcs() == vcs.Git { out, err2 := exec.Command("git", "ls-remote", remote).CombinedOutput() if err2 == nil { cache.MemTouch(remote) cc = false lines := strings.Split(string(out), "\n") for _, i := range lines { ti := strings.TrimSpace(i) if found := createGitParseVersion.FindString(ti); found != "" { tg := strings.TrimPrefix(strings.TrimSuffix(found, "^{}"), "tags/") cache.MemPut(remote, tg) if d.Reference != "" && strings.HasPrefix(ti, d.Reference) { cache.MemSetCurrent(remote, tg) } } } } } if cc { cache.Lock(key) cache.MemTouch(remote) if _, err = os.Stat(local); os.IsNotExist(err) { repo.Get() branch := findCurrentBranch(repo) c := cache.RepoInfo{DefaultBranch: branch} err = cache.SaveRepoData(key, c) if err != nil { msg.Debug("Error saving cache repo details: %s", err) } } else { repo.Update() } tgs, err := repo.Tags() if err != nil { msg.Debug("Problem getting tags: %s", err) } else { for _, v := range tgs { cache.MemPut(remote, v) } } if d.Reference != "" && repo.IsReference(d.Reference) { tgs, err = repo.TagsFromCommit(d.Reference) if err != nil { msg.Debug("Problem getting tags for commit: %s", err) } else { if len(tgs) > 0 { for _, v := range tgs { if !(repo.Vcs() == vcs.Hg && v == "tip") { cache.MemSetCurrent(remote, v) } } } } } cache.Unlock(key) } }
// ConcurrentUpdate takes a list of dependencies and updates in parallel. func ConcurrentUpdate(deps []*cfg.Dependency, cwd string, i *Installer, c *cfg.Config) error { done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error msg.Info("Downloading dependencies. Please wait...") for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: var loc string if dep.Repository != "" { loc = dep.Repository } else { loc = "https://" + dep.Name } key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) dest := filepath.Join(i.VendorPath(), dep.Name) if err := VcsUpdate(dep, dest, i.Home, i.UseCache, i.UseCacheGopath, i.UseGopath, i.Force, i.UpdateVendored, i.Updated); err != nil { msg.Err("Update failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range deps { if !c.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } return returnErr }
// Export from the cache to the vendor directory func (i *Installer) Export(conf *cfg.Config) error { tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor") if err != nil { return err } defer func() { err = os.RemoveAll(tempDir) if err != nil { msg.Err(err.Error()) } }() vp := filepath.Join(tempDir, "vendor") err = os.MkdirAll(vp, 0755) msg.Info("Exporting resolved dependencies...") done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: loc := dep.Remote() key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) cdir := filepath.Join(cache.Location(), "src", key) repo, err := dep.GetRepo(cdir) if err != nil { msg.Die(err.Error()) } msg.Info("--> Exporting %s", dep.Name) if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil { msg.Err("Export failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range conf.Imports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } if i.ResolveTest { for _, dep := range conf.DevImports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } if returnErr != nil { return returnErr } msg.Info("Replacing existing vendor dependencies") err = os.RemoveAll(i.VendorPath()) if err != nil { return err } err = os.Rename(vp, i.VendorPath()) if err != nil { // When there are different physical devices we cannot rename cross device. // Instead we copy. switch terr := err.(type) { case *os.LinkError: // syscall.EXDEV is the common name for the cross device link error // which has varying output text across different operating systems. if terr.Err == syscall.EXDEV { msg.Debug("Cross link err, trying manual copy: %s", err) return gpath.CopyDir(vp, i.VendorPath()) } else if runtime.GOOS == "windows" { // In windows it can drop down to an operating system call that // returns an operating system error with a different number and // message. Checking for that as a fall back. noerr, ok := terr.Err.(syscall.Errno) // 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error. // See https://msdn.microsoft.com/en-us/library/cc231199.aspx if ok && noerr == 0x11 { msg.Debug("Cross link err on Windows, trying manual copy: %s", err) return gpath.CopyDir(vp, i.VendorPath()) } } } } return err }
// SetReference is a command to set the VCS reference (commit id, tag, etc) for // a project. func SetReference(conf *cfg.Config, resolveTest bool) error { if len(conf.Imports) == 0 && len(conf.DevImports) == 0 { msg.Info("No references set.\n") return nil } done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for i := 0; i < concurrentWorkers; i++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: var loc string if dep.Repository != "" { loc = dep.Repository } else { loc = "https://" + dep.Name } key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) if err := VcsVersion(dep); err != nil { msg.Err("Failed to set version on %s to %s: %s\n", dep.Name, dep.Reference, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range conf.Imports { if !conf.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } if resolveTest { for _, dep := range conf.DevImports { if !conf.HasIgnore(dep.Name) { wg.Add(1) in <- dep } } } wg.Wait() // Close goroutines setting the version for i := 0; i < concurrentWorkers; i++ { done <- struct{}{} } // close(done) // close(in) return returnErr }
// Export from the cache to the vendor directory func (i *Installer) Export(conf *cfg.Config) error { tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor") if err != nil { return err } defer func() { err = os.RemoveAll(tempDir) if err != nil { msg.Err(err.Error()) } }() vp := filepath.Join(tempDir, "vendor") err = os.MkdirAll(vp, 0755) msg.Info("Exporting resolved dependencies...") done := make(chan struct{}, concurrentWorkers) in := make(chan *cfg.Dependency, concurrentWorkers) var wg sync.WaitGroup var lock sync.Mutex var returnErr error for ii := 0; ii < concurrentWorkers; ii++ { go func(ch <-chan *cfg.Dependency) { for { select { case dep := <-ch: loc := dep.Remote() key, err := cache.Key(loc) if err != nil { msg.Die(err.Error()) } cache.Lock(key) cdir := filepath.Join(cache.Location(), "src", key) repo, err := dep.GetRepo(cdir) if err != nil { msg.Die(err.Error()) } msg.Info("--> Exporting %s", dep.Name) if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil { msg.Err("Export failed for %s: %s\n", dep.Name, err) // Capture the error while making sure the concurrent // operations don't step on each other. lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } cache.Unlock(key) wg.Done() case <-done: return } } }(in) } for _, dep := range conf.Imports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } if i.ResolveTest { for _, dep := range conf.DevImports { if !conf.HasIgnore(dep.Name) { err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755) if err != nil { lock.Lock() if returnErr == nil { returnErr = err } else { returnErr = cli.NewMultiError(returnErr, err) } lock.Unlock() } wg.Add(1) in <- dep } } } wg.Wait() // Close goroutines setting the version for ii := 0; ii < concurrentWorkers; ii++ { done <- struct{}{} } if returnErr != nil { return returnErr } msg.Info("Replacing existing vendor dependencies") err = os.RemoveAll(i.VendorPath()) if err != nil { return err } err = os.Rename(vp, i.VendorPath()) // When there are different physical devices we cannot rename cross device. // Fall back to manual copy. if err != nil && strings.Contains(err.Error(), "cross-device link") { msg.Debug("Cross link err, trying manual copy: %s", err) err = gpath.CopyDir(vp, i.VendorPath()) } return err }