Ejemplo n.º 1
0
// VendoredCleanup cleans up vendored codebases after an update.
//
// This should _only_ be run for installations that do not want VCS repos inside
// of the vendor/ directory.
func VendoredCleanup(conf *cfg.Config) error {
	vend, err := gpath.Vendor()
	if err != nil {
		return err
	}

	for _, dep := range conf.Imports {
		if dep.UpdateAsVendored == true {
			msg.Info("Cleaning up vendored package %s\n", dep.Name)

			// Remove the VCS directory
			cwd := filepath.Join(vend, dep.Name)
			repo, err := dep.GetRepo(cwd)
			if err != nil {
				msg.Err("Error cleaning up %s:%s", dep.Name, err)
				continue
			}
			t := repo.Vcs()
			err = os.RemoveAll(cwd + string(os.PathSeparator) + "." + string(t))
			if err != nil {
				msg.Err("Error cleaning up VCS dir for %s:%s", dep.Name, err)
			}
		}

	}

	return nil
}
Ejemplo n.º 2
0
// MirrorsSet sets a mirror to use
func MirrorsSet(o, r, v string) error {
	if o == "" || r == "" {
		msg.Err("Both the original and mirror values are required")
		return nil
	}

	home := gpath.Home()

	op := filepath.Join(home, "mirrors.yaml")

	var ov *mirrors.Mirrors
	if _, err := os.Stat(op); os.IsNotExist(err) {
		msg.Info("No mirrors.yaml file exists. Creating new one")
		ov = &mirrors.Mirrors{
			Repos: make(mirrors.MirrorRepos, 0),
		}
	} else {
		ov, err = mirrors.ReadMirrorsFile(op)
		if err != nil {
			msg.Die("Error reading existing mirrors.yaml file: %s", err)
		}
	}

	found := false
	for i, re := range ov.Repos {
		if re.Original == o {
			found = true
			msg.Info("%s found in mirrors. Replacing with new settings", o)
			ov.Repos[i].Repo = r
			ov.Repos[i].Vcs = v
		}
	}

	if !found {
		nr := &mirrors.MirrorRepo{
			Original: o,
			Repo:     r,
			Vcs:      v,
		}
		ov.Repos = append(ov.Repos, nr)
	}

	msg.Info("%s being set to %s", o, r)

	err := ov.WriteFile(op)
	if err != nil {
		msg.Err("Error writing mirrors.yaml file: %s", err)
	} else {
		msg.Info("mirrors.yaml written with changes")
	}

	return nil
}
Ejemplo n.º 3
0
// queueUnseenImports scans a package's imports and adds any new ones to the
// processing queue.
func (r *Resolver) queueUnseen(pkg string, queue *list.List) error {
	// A pkg is marked "seen" as soon as we have inspected it the first time.
	// Seen means that we have added all of its imports to the list.

	// Already queued indicates that we've either already put it into the queue
	// or intentionally not put it in the queue for fatal reasons (e.g. no
	// buildable source).

	deps, err := r.imports(pkg)
	if err != nil && !strings.HasPrefix(err.Error(), "no buildable Go source") {
		msg.Err("Could not find %s: %s", pkg, err)
		return err
		// NOTE: If we uncomment this, we get lots of "no buildable Go source" errors,
		// which don't ever seem to be helpful. They don't actually indicate an error
		// condition, and it's perfectly okay to run into that condition.
		//} else if err != nil {
		//	msg.Warn(err.Error())
	}

	for _, d := range deps {
		if _, ok := r.alreadyQ[d]; !ok {
			r.alreadyQ[d] = true
			queue.PushBack(d)
		}
	}
	return nil
}
Ejemplo n.º 4
0
// EnsureGoVendor ensures that the Go version is correct.
func EnsureGoVendor() {
	// 6l was removed in 1.5, when vendoring was introduced.
	cmd := exec.Command("go", "tool", "6l")
	if _, err := cmd.CombinedOutput(); err == nil {
		msg.Warn("You must install the Go 1.5 or greater toolchain to work with Glide.\n")
		os.Exit(1)
	}

	// This works with 1.5 and >=1.6.
	cmd = exec.Command("go", "env", "GO15VENDOREXPERIMENT")
	if out, err := cmd.CombinedOutput(); err != nil {
		msg.Err("Error looking for $GOVENDOREXPERIMENT: %s.\n", err)
		os.Exit(1)
	} else if strings.TrimSpace(string(out)) != "1" {
		msg.Warn("To use Glide, you must set GO15VENDOREXPERIMENT=1\n")
		os.Exit(1)
	}

	// Verify the setup isn't for the old version of glide. That is, this is
	// no longer assuming the _vendor directory as the GOPATH. Inform of
	// the change.
	if _, err := os.Stat("_vendor/"); err == nil {
		msg.Warn(`Your setup appears to be for the previous version of Glide.
Previously, vendor packages were stored in _vendor/src/ and
_vendor was set as your GOPATH. As of Go 1.5 the go tools
recognize the vendor directory as a location for these
files. Glide has embraced this. Please remove the _vendor
directory or move the _vendor/src/ directory to vendor/.` + "\n")
		os.Exit(1)
	}
}
Ejemplo n.º 5
0
// OnGopath will either copy a package, already found in the GOPATH, to the
// vendor/ directory or download it from the internet. This is dependent if
// useGopath on the installer is set to true to copy from the GOPATH.
func (m *MissingPackageHandler) OnGopath(pkg string) (bool, error) {
	// If useGopath is false, we fall back to the strategy of fetching from
	// remote.
	if !m.useGopath {
		return m.NotFound(pkg)
	}

	root := util.GetRootFromPackage(pkg)

	// Skip any references to the root package.
	if root == m.Config.Name {
		return false, nil
	}

	msg.Info("Copying package %s from the GOPATH.", pkg)
	dest := filepath.Join(m.destination, pkg)
	// Find package on Gopath
	for _, gp := range gpath.Gopaths() {
		src := filepath.Join(gp, pkg)
		// FIXME: Should probably check if src is a dir or symlink.
		if _, err := os.Stat(src); err == nil {
			if err := os.MkdirAll(dest, os.ModeDir|0755); err != nil {
				return false, err
			}
			if err := gpath.CopyDir(src, dest); err != nil {
				return false, err
			}
			return true, nil
		}
	}

	msg.Err("Could not locate %s on the GOPATH, though it was found before.", pkg)
	return false, nil
}
Ejemplo n.º 6
0
// Get fetches one or more dependencies and installs.
//
// This includes resolving dependency resolution and re-generating the lock file.
func Get(names []string, installer *repo.Installer, insecure, skipRecursive bool) {
	base := gpath.Basepath()
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()
	glidefile, err := gpath.Glide()
	if err != nil {
		msg.Die("Could not find Glide file: %s", err)
	}

	// Add the packages to the config.
	if err := addPkgsToConfig(conf, names, insecure); err != nil {
		msg.Die("Failed to get new packages: %s", err)
	}

	// Fetch the new packages. Can't resolve versions via installer.Update if
	// get is called while the vendor/ directory is empty so we checkout
	// everything.
	installer.Checkout(conf, false)

	// Prior to resolving dependencies we need to start working with a clone
	// of the conf because we'll be making real changes to it.
	confcopy := conf.Clone()

	if !skipRecursive {
		// Get all repos and update them.
		// TODO: Can we streamline this in any way? The reason that we update all
		// of the dependencies is that we need to re-negotiate versions. For example,
		// if an existing dependency has the constraint >1.0 and this new package
		// adds the constraint <2.0, then this may re-resolve the existing dependency
		// to be between 1.0 and 2.0. But changing that dependency may then result
		// in that dependency's dependencies changing... so we sorta do the whole
		// thing to be safe.
		err = installer.Update(confcopy)
		if err != nil {
			msg.Die("Could not update packages: %s", err)
		}
	}

	// Set Reference
	if err := repo.SetReference(confcopy); err != nil {
		msg.Err("Failed to set references: %s", err)
	}

	// VendoredCleanup
	if installer.UpdateVendored {
		repo.VendoredCleanup(confcopy)
	}

	// Write YAML
	if err := conf.WriteFile(glidefile); err != nil {
		msg.Die("Failed to write glide YAML file: %s", err)
	}
	if !skipRecursive {
		// Write lock
		writeLock(conf, confcopy, base)
	} else {
		msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated")
	}
}
Ejemplo n.º 7
0
// Process imports dependencies for a package
func (d *VersionHandler) Process(pkg string) (e error) {
	root := util.GetRootFromPackage(pkg)

	// Skip any references to the root package.
	if root == d.Config.Name {
		return nil
	}

	// We have not tried to import, yet.
	// Should we look in places other than the root of the project?
	if d.Imported[root] == false {
		d.Imported[root] = true
		p := filepath.Join(d.Destination, root)
		f, deps, err := importer.Import(p)
		if f && err == nil {
			for _, dep := range deps {

				// The fist one wins. Would something smater than this be better?
				exists, _ := d.Use.Get(dep.Name)
				if exists == nil && (dep.Reference != "" || dep.Repository != "") {
					d.Use.Add(dep.Name, dep, root)
				}
			}
		} else if err != nil {
			msg.Err("Unable to import from %s. Err: %s", root, err)
			e = err
		}
	}

	return
}
Ejemplo n.º 8
0
// MirrorsRemove removes a mirrors setting
func MirrorsRemove(k string) error {
	if k == "" {
		msg.Err("The mirror to remove is required")
		return nil
	}

	home := gpath.Home()

	op := filepath.Join(home, "mirrors.yaml")

	if _, err := os.Stat(op); os.IsNotExist(err) {
		msg.Err("mirrors.yaml file not found")
		return nil
	}

	ov, err := mirrors.ReadMirrorsFile(op)
	if err != nil {
		msg.Die("Unable to read mirrors.yaml file: %s", err)
	}

	var nre mirrors.MirrorRepos
	var found bool
	for _, re := range ov.Repos {
		if re.Original != k {
			nre = append(nre, re)
		} else {
			found = true
		}
	}

	if !found {
		msg.Warn("%s was not found in mirrors", k)
	} else {
		msg.Info("%s was removed from mirrors", k)
		ov.Repos = nre

		err = ov.WriteFile(op)
		if err != nil {
			msg.Err("Error writing mirrors.yaml file: %s", err)
		} else {
			msg.Info("mirrors.yaml written with changes")
		}
	}

	return nil
}
Ejemplo n.º 9
0
// EnsureConfig loads and returns a config file.
//
// Any error will cause an immediate exit, with an error printed to Stderr.
func EnsureConfig() *cfg.Config {
	yamlpath, err := gpath.Glide()
	if err != nil {
		msg.ExitCode(2)
		msg.Die("Failed to find %s file in directory tree: %s", gpath.GlideFile, err)
	}

	yml, err := ioutil.ReadFile(yamlpath)
	if err != nil {
		msg.ExitCode(2)
		msg.Die("Failed to load %s: %s", yamlpath, err)
	}
	conf, err := cfg.ConfigFromYaml(yml)
	if err != nil {
		msg.ExitCode(3)
		msg.Die("Failed to parse %s: %s", yamlpath, err)
	}

	b := filepath.Dir(yamlpath)
	buildContext, err := util.GetBuildContext()
	if err != nil {
		msg.Die("Failed to build an import context while ensuring config: %s", err)
	}
	cwd, err := os.Getwd()
	if err != nil {
		msg.Err("Unable to get the current working directory")
	} else {
		// Determining a package name requires a relative path
		b, err = filepath.Rel(b, cwd)
		if err == nil {
			name := buildContext.PackageName(b)
			if name != conf.Name {
				msg.Warn("The name listed in the config file (%s) does not match the current location (%s)", conf.Name, name)
			}
		} else {
			msg.Warn("Problem finding the config file path (%s) relative to the current directory (%s): %s", b, cwd, err)
		}
	}

	err = mirrors.Load()
	if err != nil {
		msg.Err("Unable to load mirrors: %s", err)
	}

	return conf
}
Ejemplo n.º 10
0
// Plugin attempts to find and execute a plugin based on a command.
//
// Exit code 99 means the plugin was never executed. Code 1 means the program
// exited badly.
func Plugin(command string, args []string) {

	cwd, err := os.Getwd()
	if err != nil {
		msg.ExitCode(99)
		msg.Die("Could not get working directory: %s", err)
	}

	cmd := "glide-" + command
	var fullcmd string
	if fullcmd, err = exec.LookPath(cmd); err != nil {
		fullcmd = cwd + "/" + cmd
		if _, err := os.Stat(fullcmd); err != nil {
			msg.ExitCode(99)
			msg.Die("Command %s does not exist.", cmd)
		}
	}

	// Turning os.Args first argument from `glide` to `glide-command`
	args[0] = cmd
	// Removing the first argument (command)
	removed := false
	for i, v := range args {
		if removed == false && v == command {
			args = append(args[:i], args[i+1:]...)
			removed = true
		}
	}
	pa := os.ProcAttr{
		Files: []*os.File{os.Stdin, os.Stdout, os.Stderr},
		Dir:   cwd,
	}

	msg.Debug("Delegating to plugin %s (%v)\n", fullcmd, args)

	proc, err := os.StartProcess(fullcmd, args, &pa)
	if err != nil {
		msg.Err("Failed to execute %s: %s", cmd, err)
		os.Exit(98)
	}

	if _, err := proc.Wait(); err != nil {
		msg.Err(err.Error())
		os.Exit(1)
	}
}
Ejemplo n.º 11
0
// SetReference is a command to set the VCS reference (commit id, tag, etc) for
// a project.
func SetReference(conf *cfg.Config, resolveTest bool) error {

	cwd, err := gpath.Vendor()
	if err != nil {
		return err
	}

	if len(conf.Imports) == 0 && len(conf.DevImports) == 0 {
		msg.Info("No references set.\n")
		return nil
	}

	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup

	for i := 0; i < concurrentWorkers; i++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					if err := VcsVersion(dep, cwd); err != nil {
						msg.Err("Failed to set version on %s to %s: %s\n", dep.Name, dep.Reference, err)
					}
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range conf.Imports {
		if !conf.HasIgnore(dep.Name) {
			wg.Add(1)
			in <- dep
		}
	}

	if resolveTest {
		for _, dep := range conf.DevImports {
			if !conf.HasIgnore(dep.Name) {
				wg.Add(1)
				in <- dep
			}
		}
	}

	wg.Wait()
	// Close goroutines setting the version
	for i := 0; i < concurrentWorkers; i++ {
		done <- struct{}{}
	}
	// close(done)
	// close(in)

	return nil
}
Ejemplo n.º 12
0
// EnsureGoVendor ensures that the Go version is correct.
func EnsureGoVendor() {
	// 6l was removed in 1.5, when vendoring was introduced.
	cmd := exec.Command(goExecutable(), "tool", "6l")
	if _, err := cmd.CombinedOutput(); err == nil {
		msg.Warn("You must install the Go 1.5 or greater toolchain to work with Glide.\n")
		os.Exit(1)
	}

	// Check if this is go15, which requires GO15VENDOREXPERIMENT
	// Any release after go15 does not require that env var.
	cmd = exec.Command(goExecutable(), "version")
	if out, err := cmd.CombinedOutput(); err != nil {
		msg.Err("Error getting version: %s.\n", err)
		os.Exit(1)
	} else if strings.HasPrefix(string(out), "go version 1.5") {
		// This works with 1.5 and 1.6.
		cmd = exec.Command(goExecutable(), "env", "GO15VENDOREXPERIMENT")
		if out, err := cmd.CombinedOutput(); err != nil {
			msg.Err("Error looking for $GOVENDOREXPERIMENT: %s.\n", err)
			os.Exit(1)
		} else if strings.TrimSpace(string(out)) != "1" {
			msg.Err("To use Glide, you must set GO15VENDOREXPERIMENT=1")
			os.Exit(1)
		}
	}

	// In the case where vendoring is explicitly disabled, balk.
	if os.Getenv("GO15VENDOREXPERIMENT") == "0" {
		msg.Err("To use Glide, you must set GO15VENDOREXPERIMENT=1")
		os.Exit(1)
	}

	// Verify the setup isn't for the old version of glide. That is, this is
	// no longer assuming the _vendor directory as the GOPATH. Inform of
	// the change.
	if _, err := os.Stat("_vendor/"); err == nil {
		msg.Warn(`Your setup appears to be for the previous version of Glide.
Previously, vendor packages were stored in _vendor/src/ and
_vendor was set as your GOPATH. As of Go 1.5 the go tools
recognize the vendor directory as a location for these
files. Glide has embraced this. Please remove the _vendor
directory or move the _vendor/src/ directory to vendor/.` + "\n")
		os.Exit(1)
	}
}
Ejemplo n.º 13
0
Archivo: glide.go Proyecto: akutz/glide
func main() {
	app := cli.NewApp()
	app.Name = "glide"
	app.Usage = usage
	app.Version = version
	app.Flags = []cli.Flag{
		cli.StringFlag{
			Name:  "yaml, y",
			Value: "glide.yaml",
			Usage: "Set a YAML configuration file.",
		},
		cli.BoolFlag{
			Name:  "quiet, q",
			Usage: "Quiet (no info or debug messages)",
		},
		cli.BoolFlag{
			Name:  "debug",
			Usage: "Print debug verbose informational messages",
		},
		cli.StringFlag{
			Name:   "home",
			Value:  gpath.Home(),
			Usage:  "The location of Glide files",
			EnvVar: "GLIDE_HOME",
		},
		cli.StringFlag{
			Name:   "tmp",
			Value:  "",
			Usage:  "The temp directory to use. Defaults to systems temp",
			EnvVar: "GLIDE_TMP",
		},
		cli.BoolFlag{
			Name:  "no-color",
			Usage: "Turn off colored output for log messages",
		},
	}
	app.CommandNotFound = func(c *cli.Context, command string) {
		// TODO: Set some useful env vars.
		action.Plugin(command, os.Args)
	}
	app.Before = startup
	app.After = shutdown
	app.Commands = commands()

	// Detect errors from the Before and After calls and exit on them.
	if err := app.Run(os.Args); err != nil {
		msg.Err(err.Error())
		os.Exit(1)
	}

	// If there was a Error message exit non-zero.
	if msg.HasErrored() {
		m := msg.Color(msg.Red, "An Error has occurred")
		msg.Msg(m)
		os.Exit(2)
	}
}
Ejemplo n.º 14
0
// Install installs a vendor directory based on an existing Glide configuration.
func Install(installer *repo.Installer, stripVendor bool) {
	cache.SystemLock()

	base := "."
	// Ensure GOPATH
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()

	// Lockfile exists
	if !gpath.HasLock(base) {
		msg.Info("Lock file (glide.lock) does not exist. Performing update.")
		Update(installer, false, stripVendor)
		return
	}
	// Load lockfile
	lock, err := cfg.ReadLockFile(filepath.Join(base, gpath.LockFile))
	if err != nil {
		msg.Die("Could not load lockfile.")
	}
	// Verify lockfile hasn't changed
	hash, err := conf.Hash()
	if err != nil {
		msg.Die("Could not load lockfile.")
	} else if hash != lock.Hash {
		fmt.Println(hash, lock.Hash)
		foo, _ := conf.Marshal()
		fmt.Println(string(foo))
		msg.Warn("Lock file may be out of date. Hash check of YAML failed. You may need to run 'update'")
	}

	// Install
	newConf, err := installer.Install(lock, conf)
	if err != nil {
		msg.Die("Failed to install: %s", err)
	}

	msg.Info("Setting references.")

	// Set reference
	if err := repo.SetReference(newConf, installer.ResolveTest); err != nil {
		msg.Die("Failed to set references: %s (Skip to cleanup)", err)
	}

	err = installer.Export(newConf)
	if err != nil {
		msg.Die("Unable to export dependencies to vendor directory: %s", err)
	}

	if stripVendor {
		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
		err := gpath.StripVendor()
		if err != nil {
			msg.Err("Unable to strip vendor directories: %s", err)
		}
	}
}
Ejemplo n.º 15
0
Archivo: tree.go Proyecto: albrow/glide
func walkDeps(b *util.BuildCtxt, base, myName string) []string {
	externalDeps := []string{}
	filepath.Walk(base, func(path string, fi os.FileInfo, err error) error {
		if err != nil {
			return err
		}

		if !dependency.IsSrcDir(fi) {
			if fi.IsDir() {
				return filepath.SkipDir
			}
			return nil
		}

		var imps []string
		pkg, err := b.ImportDir(path, 0)
		if err != nil && strings.HasPrefix(err.Error(), "found packages ") {
			// If we got here it's because a package and multiple packages
			// declared. This is often because of an example with a package
			// or main but +build ignore as a build tag. In that case we
			// try to brute force the packages with a slower scan.
			imps, _, err = dependency.IterativeScan(path)
			if err != nil {
				msg.Err("Error walking dependencies for %s: %s", path, err)
				return err
			}
		} else if err != nil {
			if !strings.HasPrefix(err.Error(), "no buildable Go source") {
				msg.Warn("Error: %s (%s)", err, path)
				// Not sure if we should return here.
				//return err
			}
		} else {
			imps = pkg.Imports
		}

		if pkg.Goroot {
			return nil
		}

		for _, imp := range imps {
			//if strings.HasPrefix(imp, myName) {
			////Info("Skipping %s because it is a subpackage of %s", imp, myName)
			//continue
			//}
			if imp == myName {
				continue
			}
			externalDeps = append(externalDeps, imp)
		}

		return nil
	})
	return externalDeps
}
Ejemplo n.º 16
0
// ConcurrentUpdate takes a list of dependencies and updates in parallel.
func ConcurrentUpdate(deps []*cfg.Dependency, i *Installer, c *cfg.Config) error {
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					loc := dep.Remote()
					key, err := cache.Key(loc)
					if err != nil {
						msg.Die(err.Error())
					}
					cache.Lock(key)
					if err := VcsUpdate(dep, i.Force, i.Updated); err != nil {
						msg.Err("Update failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					cache.Unlock(key)
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range deps {
		if !c.HasIgnore(dep.Name) {
			wg.Add(1)
			in <- dep
		}
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	return returnErr
}
Ejemplo n.º 17
0
// NoVendor generates a list of source code directories, excepting `vendor/`.
//
// If "onlyGo" is true, only folders that have Go code in them will be returned.
//
// If suffix is true, this will append `/...` to every directory.
func NoVendor(path string, onlyGo, suffix bool) {
	// This is responsible for printing the results of noVend.
	paths, err := noVend(path, onlyGo, suffix)
	if err != nil {
		msg.Err("Failed to walk file tree: %s", err)
		msg.Warn("FIXME: NoVendor should exit with non-zero exit code.")
		return
	}

	for _, p := range paths {
		msg.Puts(p)
	}
}
Ejemplo n.º 18
0
// ConcurrentUpdate takes a list of dependencies and updates in parallel.
func ConcurrentUpdate(deps []*cfg.Dependency, cwd string, i *Installer) error {
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	msg.Info("Downloading dependencies. Please wait...")

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					dest := filepath.Join(i.VendorPath(), dep.Name)
					if err := VcsUpdate(dep, dest, i.Home, i.UseCache, i.UseCacheGopath, i.UseGopath, i.Force, i.UpdateVendored); err != nil {
						msg.Err("Update failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range deps {
		wg.Add(1)
		in <- dep
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	return returnErr
}
Ejemplo n.º 19
0
// Install installs a vendor directory based on an existing Glide configuration.
func Install(installer *repo.Installer) {
	base := "."
	// Ensure GOPATH
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()

	// Lockfile exists
	if !gpath.HasLock(base) {
		msg.Info("Lock file (glide.lock) does not exist. Performing update.")
		Update(installer, false)
		return
	}
	// Load lockfile
	lock, err := LoadLockfile(base, conf)
	if err != nil {
		msg.Die("Could not load lockfile.")
	}

	// Delete unused packages
	if installer.DeleteUnused {
		// It's unclear whether this should operate off of the lock, or off
		// of the glide.yaml file. I'd think that doing this based on the
		// lock would be much more reliable.
		dependency.DeleteUnused(conf)
	}

	// Install
	newConf, err := installer.Install(lock, conf)
	if err != nil {
		msg.Die("Failed to install: %s", err)
	}

	msg.Info("Setting references.")

	// Set reference
	if err := repo.SetReference(newConf); err != nil {
		msg.Err("Failed to set references: %s (Skip to cleanup)", err)
	}

	// VendoredCleanup. This should ONLY be run if UpdateVendored was specified.
	if installer.UpdateVendored {
		repo.VendoredCleanup(newConf)
	}
}
Ejemplo n.º 20
0
// EnsureGopath fails if GOPATH is not set, or if $GOPATH/src is missing.
//
// Otherwise it returns the value of GOPATH.
func EnsureGopath() string {
	gps := gpath.Gopaths()
	if len(gps) == 0 {
		msg.Die("$GOPATH is not set.")
	}

	for _, gp := range gps {
		_, err := os.Stat(path.Join(gp, "src"))
		if err != nil {
			msg.Warn("%s", err)
			continue
		}
		return gp
	}

	msg.Err("Could not find any of %s/src.\n", strings.Join(gps, "/src, "))
	msg.Info("As of Glide 0.5/Go 1.5, this is required.\n")
	msg.Die("Wihtout src, cannot continue.")
	return ""
}
Ejemplo n.º 21
0
// Remove removes a dependncy from the configuration.
func Remove(packages []string, inst *repo.Installer) {
	cache.SystemLock()
	base := gpath.Basepath()
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()
	glidefile, err := gpath.Glide()
	if err != nil {
		msg.Die("Could not find Glide file: %s", err)
	}

	msg.Info("Preparing to remove %d packages.", len(packages))
	conf.Imports = rmDeps(packages, conf.Imports)
	conf.DevImports = rmDeps(packages, conf.DevImports)

	// Copy used to generate locks.
	confcopy := conf.Clone()

	//confcopy.Imports = inst.List(confcopy)

	if err := repo.SetReference(confcopy, inst.ResolveTest); err != nil {
		msg.Err("Failed to set references: %s", err)
	}

	err = inst.Export(confcopy)
	if err != nil {
		msg.Die("Unable to export dependencies to vendor directory: %s", err)
	}

	// Write glide.yaml
	if err := conf.WriteFile(glidefile); err != nil {
		msg.Die("Failed to write glide YAML file: %s", err)
	}

	// Write glide lock
	writeLock(conf, confcopy, base)
}
Ejemplo n.º 22
0
// Remove removes a dependncy from the configuration.
func Remove(packages []string, inst *repo.Installer) {
	base := gpath.Basepath()
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()
	glidefile, err := gpath.Glide()
	if err != nil {
		msg.Die("Could not find Glide file: %s", err)
	}

	msg.Info("Preparing to remove %d packages.", len(packages))
	conf.Imports = rmDeps(packages, conf.Imports)
	conf.DevImports = rmDeps(packages, conf.DevImports)

	// Copy used to generate locks.
	confcopy := conf.Clone()

	confcopy.Imports = inst.List(confcopy)

	if err := repo.SetReference(confcopy, inst.ResolveTest); err != nil {
		msg.Err("Failed to set references: %s", err)
	}

	// TODO: Right now, there is no flag to enable this, so this will never be
	// run. I am not sure whether we should allow this in a rm op or not.
	if inst.UpdateVendored {
		repo.VendoredCleanup(confcopy)
	}

	// Write glide.yaml
	if err := conf.WriteFile(glidefile); err != nil {
		msg.Die("Failed to write glide YAML file: %s", err)
	}

	// Write glide lock
	writeLock(conf, confcopy, base)
}
Ejemplo n.º 23
0
// imports gets all of the imports for a given package.
//
// If the package is in GOROOT, this will return an empty list (but not
// an error).
// If it cannot resolve the pkg, it will return an error.
func (r *Resolver) imports(pkg string) ([]string, error) {

	if r.Config.HasIgnore(pkg) {
		msg.Debug("Ignoring %s", pkg)
		return []string{}, nil
	}

	// If this pkg is marked seen, we don't scan it again.
	if _, ok := r.seen[pkg]; ok {
		msg.Debug("Already saw %s", pkg)
		return []string{}, nil
	}

	// FIXME: On error this should try to NotFound to the dependency, and then import
	// it again.
	p, err := r.BuildContext.ImportDir(pkg, 0)
	if err != nil {
		return []string{}, err
	}

	// It is okay to scan a package more than once. In some cases, this is
	// desirable because the package can change between scans (e.g. as a result
	// of a failed scan resolving the situation).
	msg.Debug("=> Scanning %s (%s)", p.ImportPath, pkg)
	r.seen[pkg] = true

	// Optimization: If it's in GOROOT, it has no imports worth scanning.
	if p.Goroot {
		return []string{}, nil
	}

	// We are only looking for dependencies in vendor. No root, cgo, etc.
	buf := []string{}
	for _, imp := range p.Imports {
		if r.Config.HasIgnore(imp) {
			msg.Debug("Ignoring %s", imp)
			continue
		}
		info := r.FindPkg(imp)
		switch info.Loc {
		case LocUnknown:
			// Do we resolve here?
			found, err := r.Handler.NotFound(imp)
			if err != nil {
				msg.Err("Failed to fetch %s: %s", imp, err)
			}
			if found {
				buf = append(buf, filepath.Join(r.VendorDir, filepath.FromSlash(imp)))
				r.VersionHandler.SetVersion(imp)
				continue
			}
			r.seen[info.Path] = true
		case LocVendor:
			//msg.Debug("Vendored: %s", imp)
			buf = append(buf, info.Path)
			if err := r.Handler.InVendor(imp); err == nil {
				r.VersionHandler.SetVersion(imp)
			} else {
				msg.Warn("Error updating %s: %s", imp, err)
			}
		case LocGopath:
			found, err := r.Handler.OnGopath(imp)
			if err != nil {
				msg.Err("Failed to fetch %s: %s", imp, err)
			}
			// If the Handler marks this as found, we drop it into the buffer
			// for subsequent processing. Otherwise, we assume that we're
			// in a less-than-perfect, but functional, situation.
			if found {
				buf = append(buf, filepath.Join(r.VendorDir, filepath.FromSlash(imp)))
				r.VersionHandler.SetVersion(imp)
				continue
			}
			msg.Warn("Package %s is on GOPATH, but not vendored. Ignoring.", imp)
			r.seen[info.Path] = true
		default:
			// Local packages are an odd case. CGO cannot be scanned.
			msg.Debug("===> Skipping %s", imp)
		}
	}

	return buf, nil
}
Ejemplo n.º 24
0
// ConfigWizard reads configuration from a glide.yaml file and attempts to suggest
// improvements. The wizard is interactive.
func ConfigWizard(base string) {
	_, err := gpath.Glide()
	glidefile := gpath.GlideFile
	if err != nil {
		msg.Info("Unable to find a glide.yaml file. Would you like to create one now? Yes (Y) or No (N)")
		bres := msg.PromptUntilYorN()
		if bres {
			// Guess deps
			conf := guessDeps(base, false)
			// Write YAML
			if err := conf.WriteFile(glidefile); err != nil {
				msg.Die("Could not save %s: %s", glidefile, err)
			}
		} else {
			msg.Err("Unable to find configuration file. Please create configuration information to continue.")
		}
	}

	conf := EnsureConfig()

	err = cache.Setup()
	if err != nil {
		msg.Die("Problem setting up cache: %s", err)
	}

	msg.Info("Looking for dependencies to make suggestions on")
	msg.Info("--> Scanning for dependencies not using version ranges")
	msg.Info("--> Scanning for dependencies using commit ids")
	var deps []*cfg.Dependency
	for _, dep := range conf.Imports {
		if wizardLookInto(dep) {
			deps = append(deps, dep)
		}
	}
	for _, dep := range conf.DevImports {
		if wizardLookInto(dep) {
			deps = append(deps, dep)
		}
	}

	msg.Info("Gathering information on each dependency")
	msg.Info("--> This may take a moment. Especially on a codebase with many dependencies")
	msg.Info("--> Gathering release information for dependencies")
	msg.Info("--> Looking for dependency imports where versions are commit ids")
	for _, dep := range deps {
		wizardFindVersions(dep)
	}

	var changes int
	for _, dep := range deps {
		var remote string
		if dep.Repository != "" {
			remote = dep.Repository
		} else {
			remote = "https://" + dep.Name
		}

		// First check, ask if the tag should be used instead of the commit id for it.
		cur := cache.MemCurrent(remote)
		if cur != "" && cur != dep.Reference {
			wizardSugOnce()
			var dres bool
			asked, use, val := wizardOnce("current")
			if !use {
				dres = wizardAskCurrent(cur, dep)
			}
			if !asked {
				as := wizardRemember()
				wizardSetOnce("current", as, dres)
			}

			if asked && use {
				dres = val.(bool)
			}

			if dres {
				msg.Info("Updating %s to use the tag %s instead of commit id %s", dep.Name, cur, dep.Reference)
				dep.Reference = cur
				changes++
			}
		}

		// Second check, if no version is being used and there's a semver release ask about latest.
		memlatest := cache.MemLatest(remote)
		if dep.Reference == "" && memlatest != "" {
			wizardSugOnce()
			var dres bool
			asked, use, val := wizardOnce("latest")
			if !use {
				dres = wizardAskLatest(memlatest, dep)
			}
			if !asked {
				as := wizardRemember()
				wizardSetOnce("latest", as, dres)
			}

			if asked && use {
				dres = val.(bool)
			}

			if dres {
				msg.Info("Updating %s to use the release %s instead of no release", dep.Name, memlatest)
				dep.Reference = memlatest
				changes++
			}
		}

		// Third check, if the version is semver offer to use a range instead.
		sv, err := semver.NewVersion(dep.Reference)
		if err == nil {
			wizardSugOnce()
			var res string
			asked, use, val := wizardOnce("range")
			if !use {
				res = wizardAskRange(sv, dep)
			}
			if !asked {
				as := wizardRemember()
				wizardSetOnce("range", as, res)
			}

			if asked && use {
				res = val.(string)
			}

			if res == "m" {
				r := "^" + sv.String()
				msg.Info("Updating %s to use the range %s instead of commit id %s", dep.Name, r, dep.Reference)
				dep.Reference = r
				changes++
			} else if res == "p" {
				r := "~" + sv.String()
				msg.Info("Updating %s to use the range %s instead of commit id %s", dep.Name, r, dep.Reference)
				dep.Reference = r
				changes++
			}
		}
	}

	if changes > 0 {
		msg.Info("Configuration changes have been made. Would you like to write these")
		msg.Info("changes to your configuration file? Yes (Y) or No (N)")
		dres := msg.PromptUntilYorN()
		if dres {
			msg.Info("Writing updates to configuration file (%s)", glidefile)
			if err := conf.WriteFile(glidefile); err != nil {
				msg.Die("Could not save %s: %s", glidefile, err)
			}
			msg.Info("You can now edit the glide.yaml file.:")
			msg.Info("--> For more information on versions and ranges see https://glide.sh/docs/versions/")
			msg.Info("--> For details on additional metadata see https://glide.sh/docs/glide.yaml/")
		} else {
			msg.Warn("Change not written to configuration file")
		}
	} else {
		msg.Info("No proposed changes found. Have a nice day.")
	}
}
Ejemplo n.º 25
0
// Get fetches one or more dependencies and installs.
//
// This includes resolving dependency resolution and re-generating the lock file.
func Get(names []string, installer *repo.Installer, insecure, skipRecursive, strip, stripVendor, nonInteract bool) {
	if installer.UseCache {
		cache.SystemLock()
	}

	base := gpath.Basepath()
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()
	glidefile, err := gpath.Glide()
	if err != nil {
		msg.Die("Could not find Glide file: %s", err)
	}

	// Add the packages to the config.
	if count, err2 := addPkgsToConfig(conf, names, insecure, nonInteract); err2 != nil {
		msg.Die("Failed to get new packages: %s", err2)
	} else if count == 0 {
		msg.Warn("Nothing to do")
		return
	}

	// Fetch the new packages. Can't resolve versions via installer.Update if
	// get is called while the vendor/ directory is empty so we checkout
	// everything.
	err = installer.Checkout(conf, false)
	if err != nil {
		msg.Die("Failed to checkout packages: %s", err)
	}

	// Prior to resolving dependencies we need to start working with a clone
	// of the conf because we'll be making real changes to it.
	confcopy := conf.Clone()

	if !skipRecursive {
		// Get all repos and update them.
		// TODO: Can we streamline this in any way? The reason that we update all
		// of the dependencies is that we need to re-negotiate versions. For example,
		// if an existing dependency has the constraint >1.0 and this new package
		// adds the constraint <2.0, then this may re-resolve the existing dependency
		// to be between 1.0 and 2.0. But changing that dependency may then result
		// in that dependency's dependencies changing... so we sorta do the whole
		// thing to be safe.
		err = installer.Update(confcopy)
		if err != nil {
			msg.Die("Could not update packages: %s", err)
		}
	}

	// Set Reference
	if err := repo.SetReference(confcopy); err != nil {
		msg.Err("Failed to set references: %s", err)
	}

	// VendoredCleanup
	// When stripping VCS happens this will happen as well. No need for double
	// effort.
	if installer.UpdateVendored && !strip {
		repo.VendoredCleanup(confcopy)
	}

	// Write YAML
	if err := conf.WriteFile(glidefile); err != nil {
		msg.Die("Failed to write glide YAML file: %s", err)
	}
	if !skipRecursive {
		// Write lock
		if stripVendor {
			confcopy = godep.RemoveGodepSubpackages(confcopy)
		}
		writeLock(conf, confcopy, base)
	} else {
		msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated")
	}

	if strip {
		msg.Info("Removing version control data from vendor directory...")
		gpath.StripVcs()
	}

	if stripVendor {
		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
		err := gpath.StripVendor()
		if err != nil {
			msg.Err("Unable to strip vendor directories: %s", err)
		}
	}
}
Ejemplo n.º 26
0
// Export from the cache to the vendor directory
func (i *Installer) Export(conf *cfg.Config) error {
	tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor")
	if err != nil {
		return err
	}
	defer func() {
		err = os.RemoveAll(tempDir)
		if err != nil {
			msg.Err(err.Error())
		}
	}()

	vp := filepath.Join(tempDir, "vendor")
	err = os.MkdirAll(vp, 0755)

	msg.Info("Exporting resolved dependencies...")
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					loc := dep.Remote()
					key, err := cache.Key(loc)
					if err != nil {
						msg.Die(err.Error())
					}
					cache.Lock(key)

					cdir := filepath.Join(cache.Location(), "src", key)
					repo, err := dep.GetRepo(cdir)
					if err != nil {
						msg.Die(err.Error())
					}
					msg.Info("--> Exporting %s", dep.Name)
					if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil {
						msg.Err("Export failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					cache.Unlock(key)
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range conf.Imports {
		if !conf.HasIgnore(dep.Name) {
			err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
			if err != nil {
				lock.Lock()
				if returnErr == nil {
					returnErr = err
				} else {
					returnErr = cli.NewMultiError(returnErr, err)
				}
				lock.Unlock()
			}
			wg.Add(1)
			in <- dep
		}
	}

	if i.ResolveTest {
		for _, dep := range conf.DevImports {
			if !conf.HasIgnore(dep.Name) {
				err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
				if err != nil {
					lock.Lock()
					if returnErr == nil {
						returnErr = err
					} else {
						returnErr = cli.NewMultiError(returnErr, err)
					}
					lock.Unlock()
				}
				wg.Add(1)
				in <- dep
			}
		}
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	if returnErr != nil {
		return returnErr
	}

	msg.Info("Replacing existing vendor dependencies")
	err = os.RemoveAll(i.VendorPath())
	if err != nil {
		return err
	}

	err = os.Rename(vp, i.VendorPath())

	if err != nil {
		// When there are different physical devices we cannot rename cross device.
		// Instead we copy.
		switch terr := err.(type) {
		case *os.LinkError:
			// syscall.EXDEV is the common name for the cross device link error
			// which has varying output text across different operating systems.
			if terr.Err == syscall.EXDEV {
				msg.Debug("Cross link err, trying manual copy: %s", err)
				return gpath.CopyDir(vp, i.VendorPath())
			} else if runtime.GOOS == "windows" {
				// In windows it can drop down to an operating system call that
				// returns an operating system error with a different number and
				// message. Checking for that as a fall back.
				noerr, ok := terr.Err.(syscall.Errno)
				// 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error.
				// See https://msdn.microsoft.com/en-us/library/cc231199.aspx
				if ok && noerr == 0x11 {
					msg.Debug("Cross link err on Windows, trying manual copy: %s", err)
					return gpath.CopyDir(vp, i.VendorPath())
				}
			}
		}
	}

	return err

}
Ejemplo n.º 27
0
// Install installs a vendor directory based on an existing Glide configuration.
func Install(installer *repo.Installer, strip, stripVendor bool) {
	if installer.UseCache {
		cache.SystemLock()
	}

	base := "."
	// Ensure GOPATH
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()

	// Lockfile exists
	if !gpath.HasLock(base) {
		msg.Info("Lock file (glide.lock) does not exist. Performing update.")
		Update(installer, false, strip, stripVendor)
		return
	}
	// Load lockfile
	lock, err := LoadLockfile(base, conf)
	if err != nil {
		msg.Die("Could not load lockfile.")
	}

	// Delete unused packages
	if installer.DeleteUnused {
		// It's unclear whether this should operate off of the lock, or off
		// of the glide.yaml file. I'd think that doing this based on the
		// lock would be much more reliable.
		dependency.DeleteUnused(conf)
	}

	// Install
	newConf, err := installer.Install(lock, conf)
	if err != nil {
		msg.Die("Failed to install: %s", err)
	}

	msg.Info("Setting references.")

	// Set reference
	if err := repo.SetReference(newConf); err != nil {
		msg.Err("Failed to set references: %s (Skip to cleanup)", err)
	}

	// VendoredCleanup. This should ONLY be run if UpdateVendored was specified.
	// When stripping VCS happens this will happen as well. No need for double
	// effort.
	if installer.UpdateVendored && !strip {
		repo.VendoredCleanup(newConf)
	}

	if strip {
		msg.Info("Removing version control data from vendor directory...")
		gpath.StripVcs()
	}

	if stripVendor {
		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
		err := gpath.StripVendor()
		if err != nil {
			msg.Err("Unable to strip vendor directories: %s", err)
		}
	}
}
Ejemplo n.º 28
0
Archivo: vcs.go Proyecto: heewa/glide
// VcsUpdate updates to a particular checkout based on the VCS setting.
func VcsUpdate(dep *cfg.Dependency, dest, home string, cache, cacheGopath, useGopath, force, updateVendored bool, updated *UpdateTracker) error {

	// If the dependency has already been pinned we can skip it. This is a
	// faster path so we don't need to resolve it again.
	if dep.Pin != "" {
		msg.Debug("Dependency %s has already been pinned. Fetching updates skipped.", dep.Name)
		return nil
	}

	if updated.Check(dep.Name) {
		msg.Debug("%s was already updated, skipping.", dep.Name)
		return nil
	}
	updated.Add(dep.Name)

	msg.Info("--> Fetching updates for %s.", dep.Name)

	if filterArchOs(dep) {
		msg.Info("%s is not used for %s/%s.\n", dep.Name, runtime.GOOS, runtime.GOARCH)
		return nil
	}

	// If destination doesn't exist we need to perform an initial checkout.
	if _, err := os.Stat(dest); os.IsNotExist(err) {
		if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
			msg.Warn("Unable to checkout %s\n", dep.Name)
			return err
		}
	} else {
		// At this point we have a directory for the package.

		// When the directory is not empty and has no VCS directory it's
		// a vendored files situation.
		empty, err := gpath.IsDirectoryEmpty(dest)
		if err != nil {
			return err
		}
		_, err = v.DetectVcsFromFS(dest)
		if updateVendored == false && empty == false && err == v.ErrCannotDetectVCS {
			msg.Warn("%s appears to be a vendored package. Unable to update. Consider the '--update-vendored' flag.\n", dep.Name)
		} else if updateVendored == false && empty == true && err == v.ErrCannotDetectVCS {
			msg.Warn("%s is an empty directory. Fetching a new copy of the dependency.", dep.Name)
			msg.Debug("Removing empty directory %s", dest)
			err := os.RemoveAll(dest)
			if err != nil {
				return err
			}
			if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
				msg.Warn("Unable to checkout %s\n", dep.Name)
				return err
			}
		} else {

			if updateVendored == true && empty == false && err == v.ErrCannotDetectVCS {
				// A vendored package, no repo, and updating the vendored packages
				// has been opted into.
				msg.Info("%s is a vendored package. Updating.", dep.Name)
				err = os.RemoveAll(dest)
				if err != nil {
					msg.Err("Unable to update vendored dependency %s.\n", dep.Name)
					return err
				}
				dep.UpdateAsVendored = true

				if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
					msg.Warn("Unable to checkout %s\n", dep.Name)
					return err
				}

				return nil
			}

			repo, err := dep.GetRepo(dest)

			// Tried to checkout a repo to a path that does not work. Either the
			// type or endpoint has changed. Force is being passed in so the old
			// location can be removed and replaced with the new one.
			// Warning, any changes in the old location will be deleted.
			// TODO: Put dirty checking in on the existing local checkout.
			if (err == v.ErrWrongVCS || err == v.ErrWrongRemote) && force == true {
				var newRemote string
				if len(dep.Repository) > 0 {
					newRemote = dep.Repository
				} else {
					newRemote = "https://" + dep.Name
				}

				msg.Warn("Replacing %s with contents from %s\n", dep.Name, newRemote)
				rerr := os.RemoveAll(dest)
				if rerr != nil {
					return rerr
				}
				if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
					msg.Warn("Unable to checkout %s\n", dep.Name)
					return err
				}

				repo, err = dep.GetRepo(dest)
				if err != nil {
					return err
				}
			} else if err != nil {
				return err
			} else if repo.IsDirty() {
				return fmt.Errorf("%s contains uncommitted changes. Skipping update", dep.Name)
			}

			// Check if the current version is a tag or commit id. If it is
			// and that version is already checked out we can skip updating
			// which is faster than going out to the Internet to perform
			// an update.
			if dep.Reference != "" {
				version, err := repo.Version()
				if err != nil {
					return err
				}
				ib, err := isBranch(dep.Reference, repo)
				if err != nil {
					return err
				}

				// If the current version equals the ref and it's not a
				// branch it's a tag or commit id so we can skip
				// performing an update.
				if version == dep.Reference && !ib {
					msg.Debug("%s is already set to version %s. Skipping update.", dep.Name, dep.Reference)
					return nil
				}
			}

			if err := repo.Update(); err != nil {
				msg.Warn("Download failed.\n")
				return err
			}
		}
	}

	return nil
}
Ejemplo n.º 29
0
// Update updates repos and the lock file from the main glide yaml.
func Update(installer *repo.Installer, skipRecursive, strip, stripVendor bool) {
	if installer.UseCache {
		cache.SystemLock()
	}

	base := "."
	EnsureGopath()
	EnsureVendorDir()
	conf := EnsureConfig()

	// Delete unused packages
	if installer.DeleteUnused {
		dependency.DeleteUnused(conf)
	}

	// Try to check out the initial dependencies.
	if err := installer.Checkout(conf, false); err != nil {
		msg.Die("Failed to do initial checkout of config: %s", err)
	}

	// Set the versions for the initial dependencies so that resolved dependencies
	// are rooted in the correct version of the base.
	if err := repo.SetReference(conf); err != nil {
		msg.Die("Failed to set initial config references: %s", err)
	}

	// Prior to resolving dependencies we need to start working with a clone
	// of the conf because we'll be making real changes to it.
	confcopy := conf.Clone()

	if !skipRecursive {
		// Get all repos and update them.
		err := installer.Update(confcopy)
		if err != nil {
			msg.Die("Could not update packages: %s", err)
		}

		// TODO: There is no support here for importing Godeps, GPM, and GB files.
		// I think that all we really need to do now is hunt for these files, and then
		// roll their version numbers into the config file.

		// Set references. There may be no remaining references to set since the
		// installer set them as it went to make sure it parsed the right imports
		// from the right version of the package.
		msg.Info("Setting references for remaining imports")
		if err := repo.SetReference(confcopy); err != nil {
			msg.Err("Failed to set references: %s (Skip to cleanup)", err)
		}
	}
	// Vendored cleanup
	// VendoredCleanup. This should ONLY be run if UpdateVendored was specified.
	// When stripping VCS happens this will happen as well. No need for double
	// effort.
	if installer.UpdateVendored && !strip {
		repo.VendoredCleanup(confcopy)
	}

	// Write glide.yaml (Why? Godeps/GPM/GB?)
	// I think we don't need to write a new Glide file because update should not
	// change anything important. It will just generate information about
	// transative dependencies, all of which belongs exclusively in the lock
	// file, not the glide.yaml file.
	// TODO(mattfarina): Detect when a new dependency has been added or removed
	// from the project. A removed dependency should warn and an added dependency
	// should be added to the glide.yaml file. See issue #193.

	if stripVendor {
		confcopy = godep.RemoveGodepSubpackages(confcopy)
	}

	if !skipRecursive {
		// Write lock
		hash, err := conf.Hash()
		if err != nil {
			msg.Die("Failed to generate config hash. Unable to generate lock file.")
		}
		lock := cfg.NewLockfile(confcopy.Imports, hash)
		wl := true
		if gpath.HasLock(base) {
			yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile))
			if err == nil {
				l2, err := cfg.LockfileFromYaml(yml)
				if err == nil {
					f1, err := l2.Fingerprint()
					f2, err2 := lock.Fingerprint()
					if err == nil && err2 == nil && f1 == f2 {
						wl = false
					}
				}
			}
		}
		if wl {
			if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil {
				msg.Err("Could not write lock file to %s: %s", base, err)
				return
			}
		} else {
			msg.Info("Versions did not change. Skipping glide.lock update.")
		}

		msg.Info("Project relies on %d dependencies.", len(confcopy.Imports))
	} else {
		msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated")
	}

	if strip {
		msg.Info("Removing version control data from vendor directory...")
		gpath.StripVcs()
	}

	if stripVendor {
		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
		err := gpath.StripVendor()
		if err != nil {
			msg.Err("Unable to strip vendor directories: %s", err)
		}
	}
}
Ejemplo n.º 30
0
// resolveList takes a list and resolves it.
//
// This walks the entire file tree for the given dependencies, not just the
// parts that are imported directly. Using this will discover dependencies
// regardless of OS, and arch.
func (r *Resolver) resolveList(queue *list.List) ([]string, error) {

	var failedDep string
	for e := queue.Front(); e != nil; e = e.Next() {
		dep := e.Value.(string)
		t := strings.TrimPrefix(dep, r.VendorDir+string(os.PathSeparator))
		if r.Config.HasIgnore(t) {
			msg.Info("Ignoring: %s", t)
			continue
		}
		r.VersionHandler.Process(t)
		//msg.Warn("#### %s ####", dep)
		//msg.Info("Seen Count: %d", len(r.seen))
		// Catch the outtermost dependency.
		failedDep = dep
		err := filepath.Walk(dep, func(path string, fi os.FileInfo, err error) error {
			if err != nil && err != filepath.SkipDir {
				return err
			}

			// Skip files.
			if !fi.IsDir() {
				return nil
			}
			// Skip dirs that are not source.
			if !srcDir(fi) {
				//msg.Debug("Skip resource %s", fi.Name())
				return filepath.SkipDir
			}

			// Anything that comes through here has already been through
			// the queue.
			r.alreadyQ[path] = true
			e := r.queueUnseen(path, queue)
			if err != nil {
				failedDep = path
				//msg.Err("Failed to fetch dependency %s: %s", path, err)
			}
			return e
		})
		if err != nil && err != filepath.SkipDir {
			msg.Err("Dependency %s failed to resolve: %s.", failedDep, err)
			return []string{}, err
		}
	}

	res := make([]string, 0, queue.Len())

	// In addition to generating a list
	for e := queue.Front(); e != nil; e = e.Next() {
		t := strings.TrimPrefix(e.Value.(string), r.VendorDir+string(os.PathSeparator))
		root, sp := util.NormalizeName(t)

		// TODO(mattfarina): Need to eventually support devImport
		existing := r.Config.Imports.Get(root)
		if existing != nil {
			if sp != "" && !existing.HasSubpackage(sp) {
				existing.Subpackages = append(existing.Subpackages, sp)
			}
		} else {
			newDep := &cfg.Dependency{
				Name: root,
			}
			if sp != "" {
				newDep.Subpackages = []string{sp}
			}

			r.Config.Imports = append(r.Config.Imports, newDep)
		}
		res = append(res, e.Value.(string))
	}

	return res, nil
}