// Get fetches one or more dependencies and installs. // // This includes resolving dependency resolution and re-generating the lock file. func Get(names []string, installer *repo.Installer, insecure, skipRecursive bool) { base := gpath.Basepath() EnsureGopath() EnsureVendorDir() conf := EnsureConfig() glidefile, err := gpath.Glide() if err != nil { msg.Die("Could not find Glide file: %s", err) } // Add the packages to the config. if err := addPkgsToConfig(conf, names, insecure); err != nil { msg.Die("Failed to get new packages: %s", err) } // Fetch the new packages. Can't resolve versions via installer.Update if // get is called while the vendor/ directory is empty so we checkout // everything. installer.Checkout(conf, false) // Prior to resolving dependencies we need to start working with a clone // of the conf because we'll be making real changes to it. confcopy := conf.Clone() if !skipRecursive { // Get all repos and update them. // TODO: Can we streamline this in any way? The reason that we update all // of the dependencies is that we need to re-negotiate versions. For example, // if an existing dependency has the constraint >1.0 and this new package // adds the constraint <2.0, then this may re-resolve the existing dependency // to be between 1.0 and 2.0. But changing that dependency may then result // in that dependency's dependencies changing... so we sorta do the whole // thing to be safe. err = installer.Update(confcopy) if err != nil { msg.Die("Could not update packages: %s", err) } } // Set Reference if err := repo.SetReference(confcopy); err != nil { msg.Error("Failed to set references: %s", err) } // VendoredCleanup if installer.UpdateVendored { repo.VendoredCleanup(confcopy) } // Write YAML if err := conf.WriteFile(glidefile); err != nil { msg.Die("Failed to write glide YAML file: %s", err) } if !skipRecursive { // Write lock writeLock(conf, confcopy, base) } else { msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated") } }
// Install installs a vendor directory based on an existing Glide configuration. func Install(installer *repo.Installer, stripVendor bool) { cache.SystemLock() base := "." // Ensure GOPATH EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Lockfile exists if !gpath.HasLock(base) { msg.Info("Lock file (glide.lock) does not exist. Performing update.") Update(installer, false, stripVendor) return } // Load lockfile lock, err := cfg.ReadLockFile(filepath.Join(base, gpath.LockFile)) if err != nil { msg.Die("Could not load lockfile.") } // Verify lockfile hasn't changed hash, err := conf.Hash() if err != nil { msg.Die("Could not load lockfile.") } else if hash != lock.Hash { fmt.Println(hash, lock.Hash) foo, _ := conf.Marshal() fmt.Println(string(foo)) msg.Warn("Lock file may be out of date. Hash check of YAML failed. You may need to run 'update'") } // Install newConf, err := installer.Install(lock, conf) if err != nil { msg.Die("Failed to install: %s", err) } msg.Info("Setting references.") // Set reference if err := repo.SetReference(newConf, installer.ResolveTest); err != nil { msg.Die("Failed to set references: %s (Skip to cleanup)", err) } err = installer.Export(newConf) if err != nil { msg.Die("Unable to export dependencies to vendor directory: %s", err) } if stripVendor { msg.Info("Removing nested vendor and Godeps/_workspace directories...") err := gpath.StripVendor() if err != nil { msg.Err("Unable to strip vendor directories: %s", err) } } }
// Install installs a vendor directory based on an existing Glide configuration. func Install(installer *repo.Installer) { base := "." // Ensure GOPATH EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Lockfile exists if !gpath.HasLock(base) { msg.Info("Lock file (glide.lock) does not exist. Performing update.") Update(installer, false) return } // Load lockfile lock, err := LoadLockfile(base, conf) if err != nil { msg.Die("Could not load lockfile.") } // Delete unused packages if installer.DeleteUnused { // It's unclear whether this should operate off of the lock, or off // of the glide.yaml file. I'd think that doing this based on the // lock would be much more reliable. dependency.DeleteUnused(conf) } // Install newConf, err := installer.Install(lock, conf) if err != nil { msg.Die("Failed to install: %s", err) } msg.Info("Setting references.") // Set reference if err := repo.SetReference(newConf); err != nil { msg.Error("Failed to set references: %s (Skip to cleanup)", err) } // VendoredCleanup. This should ONLY be run if UpdateVendored was specified. if installer.UpdateVendored { repo.VendoredCleanup(newConf) } }
// Remove removes a dependncy from the configuration. func Remove(packages []string, inst *repo.Installer) { base := gpath.Basepath() EnsureGopath() EnsureVendorDir() conf := EnsureConfig() glidefile, err := gpath.Glide() if err != nil { msg.Die("Could not find Glide file: %s", err) } msg.Info("Preparing to remove %d packages.", len(packages)) conf.Imports = rmDeps(packages, conf.Imports) conf.DevImports = rmDeps(packages, conf.DevImports) // Copy used to generate locks. confcopy := conf.Clone() confcopy.Imports = inst.List(confcopy) if err := repo.SetReference(confcopy, inst.ResolveTest); err != nil { msg.Err("Failed to set references: %s", err) } // TODO: Right now, there is no flag to enable this, so this will never be // run. I am not sure whether we should allow this in a rm op or not. if inst.UpdateVendored { repo.VendoredCleanup(confcopy) } // Write glide.yaml if err := conf.WriteFile(glidefile); err != nil { msg.Die("Failed to write glide YAML file: %s", err) } // Write glide lock writeLock(conf, confcopy, base) }
// Remove removes a dependncy from the configuration. func Remove(packages []string, inst *repo.Installer) { cache.SystemLock() base := gpath.Basepath() EnsureGopath() EnsureVendorDir() conf := EnsureConfig() glidefile, err := gpath.Glide() if err != nil { msg.Die("Could not find Glide file: %s", err) } msg.Info("Preparing to remove %d packages.", len(packages)) conf.Imports = rmDeps(packages, conf.Imports) conf.DevImports = rmDeps(packages, conf.DevImports) // Copy used to generate locks. confcopy := conf.Clone() //confcopy.Imports = inst.List(confcopy) if err := repo.SetReference(confcopy, inst.ResolveTest); err != nil { msg.Err("Failed to set references: %s", err) } err = inst.Export(confcopy) if err != nil { msg.Die("Unable to export dependencies to vendor directory: %s", err) } // Write glide.yaml if err := conf.WriteFile(glidefile); err != nil { msg.Die("Failed to write glide YAML file: %s", err) } // Write glide lock writeLock(conf, confcopy, base) }
// Update updates repos and the lock file from the main glide yaml. func Update(installer *repo.Installer, skipRecursive bool) { base := "." EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Delete unused packages if installer.DeleteUnused { dependency.DeleteUnused(conf) } // Try to check out the initial dependencies. if err := installer.Checkout(conf, false); err != nil { msg.Die("Failed to do initial checkout of config: %s", err) } // Set the versions for the initial dependencies so that resolved dependencies // are rooted in the correct version of the base. if err := repo.SetReference(conf); err != nil { msg.Die("Failed to set initial config references: %s", err) } // Prior to resolving dependencies we need to start working with a clone // of the conf because we'll be making real changes to it. confcopy := conf.Clone() if !skipRecursive { // Get all repos and update them. err := installer.Update(confcopy) if err != nil { msg.Die("Could not update packages: %s", err) } // TODO: There is no support here for importing Godeps, GPM, and GB files. // I think that all we really need to do now is hunt for these files, and then // roll their version numbers into the config file. // Set references. There may be no remaining references to set since the // installer set them as it went to make sure it parsed the right imports // from the right version of the package. msg.Info("Setting references for remaining imports") if err := repo.SetReference(confcopy); err != nil { msg.Error("Failed to set references: %s (Skip to cleanup)", err) } } // Vendored cleanup // VendoredCleanup. This should ONLY be run if UpdateVendored was specified. if installer.UpdateVendored { repo.VendoredCleanup(confcopy) } // Write glide.yaml (Why? Godeps/GPM/GB?) // I think we don't need to write a new Glide file because update should not // change anything important. It will just generate information about // transative dependencies, all of which belongs exclusively in the lock // file, not the glide.yaml file. // TODO(mattfarina): Detect when a new dependency has been added or removed // from the project. A removed dependency should warn and an added dependency // should be added to the glide.yaml file. See issue #193. if !skipRecursive { // Write lock hash, err := conf.Hash() if err != nil { msg.Die("Failed to generate config hash. Unable to generate lock file.") } lock := cfg.NewLockfile(confcopy.Imports, hash) if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil { msg.Error("Could not write lock file to %s: %s", base, err) return } msg.Info("Project relies on %d dependencies.", len(confcopy.Imports)) } else { msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated") } }
// Get fetches one or more dependencies and installs. // // This includes resolving dependency resolution and re-generating the lock file. func Get(names []string, installer *repo.Installer, insecure, skipRecursive, strip, stripVendor, nonInteract bool) { if installer.UseCache { cache.SystemLock() } base := gpath.Basepath() EnsureGopath() EnsureVendorDir() conf := EnsureConfig() glidefile, err := gpath.Glide() if err != nil { msg.Die("Could not find Glide file: %s", err) } // Add the packages to the config. if count, err2 := addPkgsToConfig(conf, names, insecure, nonInteract); err2 != nil { msg.Die("Failed to get new packages: %s", err2) } else if count == 0 { msg.Warn("Nothing to do") return } // Fetch the new packages. Can't resolve versions via installer.Update if // get is called while the vendor/ directory is empty so we checkout // everything. err = installer.Checkout(conf, false) if err != nil { msg.Die("Failed to checkout packages: %s", err) } // Prior to resolving dependencies we need to start working with a clone // of the conf because we'll be making real changes to it. confcopy := conf.Clone() if !skipRecursive { // Get all repos and update them. // TODO: Can we streamline this in any way? The reason that we update all // of the dependencies is that we need to re-negotiate versions. For example, // if an existing dependency has the constraint >1.0 and this new package // adds the constraint <2.0, then this may re-resolve the existing dependency // to be between 1.0 and 2.0. But changing that dependency may then result // in that dependency's dependencies changing... so we sorta do the whole // thing to be safe. err = installer.Update(confcopy) if err != nil { msg.Die("Could not update packages: %s", err) } } // Set Reference if err := repo.SetReference(confcopy); err != nil { msg.Err("Failed to set references: %s", err) } // VendoredCleanup // When stripping VCS happens this will happen as well. No need for double // effort. if installer.UpdateVendored && !strip { repo.VendoredCleanup(confcopy) } // Write YAML if err := conf.WriteFile(glidefile); err != nil { msg.Die("Failed to write glide YAML file: %s", err) } if !skipRecursive { // Write lock if stripVendor { confcopy = godep.RemoveGodepSubpackages(confcopy) } writeLock(conf, confcopy, base) } else { msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated") } if strip { msg.Info("Removing version control data from vendor directory...") gpath.StripVcs() } if stripVendor { msg.Info("Removing nested vendor and Godeps/_workspace directories...") err := gpath.StripVendor() if err != nil { msg.Err("Unable to strip vendor directories: %s", err) } } }
// Install installs a vendor directory based on an existing Glide configuration. func Install(installer *repo.Installer, strip, stripVendor bool) { if installer.UseCache { cache.SystemLock() } base := "." // Ensure GOPATH EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Lockfile exists if !gpath.HasLock(base) { msg.Info("Lock file (glide.lock) does not exist. Performing update.") Update(installer, false, strip, stripVendor) return } // Load lockfile lock, err := LoadLockfile(base, conf) if err != nil { msg.Die("Could not load lockfile.") } // Delete unused packages if installer.DeleteUnused { // It's unclear whether this should operate off of the lock, or off // of the glide.yaml file. I'd think that doing this based on the // lock would be much more reliable. dependency.DeleteUnused(conf) } // Install newConf, err := installer.Install(lock, conf) if err != nil { msg.Die("Failed to install: %s", err) } msg.Info("Setting references.") // Set reference if err := repo.SetReference(newConf); err != nil { msg.Err("Failed to set references: %s (Skip to cleanup)", err) } // VendoredCleanup. This should ONLY be run if UpdateVendored was specified. // When stripping VCS happens this will happen as well. No need for double // effort. if installer.UpdateVendored && !strip { repo.VendoredCleanup(newConf) } if strip { msg.Info("Removing version control data from vendor directory...") gpath.StripVcs() } if stripVendor { msg.Info("Removing nested vendor and Godeps/_workspace directories...") err := gpath.StripVendor() if err != nil { msg.Err("Unable to strip vendor directories: %s", err) } } }
// Update updates repos and the lock file from the main glide yaml. func Update(installer *repo.Installer, skipRecursive, strip, stripVendor bool) { if installer.UseCache { cache.SystemLock() } base := "." EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Delete unused packages if installer.DeleteUnused { dependency.DeleteUnused(conf) } // Try to check out the initial dependencies. if err := installer.Checkout(conf, false); err != nil { msg.Die("Failed to do initial checkout of config: %s", err) } // Set the versions for the initial dependencies so that resolved dependencies // are rooted in the correct version of the base. if err := repo.SetReference(conf); err != nil { msg.Die("Failed to set initial config references: %s", err) } // Prior to resolving dependencies we need to start working with a clone // of the conf because we'll be making real changes to it. confcopy := conf.Clone() if !skipRecursive { // Get all repos and update them. err := installer.Update(confcopy) if err != nil { msg.Die("Could not update packages: %s", err) } // TODO: There is no support here for importing Godeps, GPM, and GB files. // I think that all we really need to do now is hunt for these files, and then // roll their version numbers into the config file. // Set references. There may be no remaining references to set since the // installer set them as it went to make sure it parsed the right imports // from the right version of the package. msg.Info("Setting references for remaining imports") if err := repo.SetReference(confcopy); err != nil { msg.Err("Failed to set references: %s (Skip to cleanup)", err) } } // Vendored cleanup // VendoredCleanup. This should ONLY be run if UpdateVendored was specified. // When stripping VCS happens this will happen as well. No need for double // effort. if installer.UpdateVendored && !strip { repo.VendoredCleanup(confcopy) } // Write glide.yaml (Why? Godeps/GPM/GB?) // I think we don't need to write a new Glide file because update should not // change anything important. It will just generate information about // transative dependencies, all of which belongs exclusively in the lock // file, not the glide.yaml file. // TODO(mattfarina): Detect when a new dependency has been added or removed // from the project. A removed dependency should warn and an added dependency // should be added to the glide.yaml file. See issue #193. if stripVendor { confcopy = godep.RemoveGodepSubpackages(confcopy) } if !skipRecursive { // Write lock hash, err := conf.Hash() if err != nil { msg.Die("Failed to generate config hash. Unable to generate lock file.") } lock := cfg.NewLockfile(confcopy.Imports, hash) wl := true if gpath.HasLock(base) { yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile)) if err == nil { l2, err := cfg.LockfileFromYaml(yml) if err == nil { f1, err := l2.Fingerprint() f2, err2 := lock.Fingerprint() if err == nil && err2 == nil && f1 == f2 { wl = false } } } } if wl { if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil { msg.Err("Could not write lock file to %s: %s", base, err) return } } else { msg.Info("Versions did not change. Skipping glide.lock update.") } msg.Info("Project relies on %d dependencies.", len(confcopy.Imports)) } else { msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated") } if strip { msg.Info("Removing version control data from vendor directory...") gpath.StripVcs() } if stripVendor { msg.Info("Removing nested vendor and Godeps/_workspace directories...") err := gpath.StripVendor() if err != nil { msg.Err("Unable to strip vendor directories: %s", err) } } }
// Update updates repos and the lock file from the main glide yaml. func Update(installer *repo.Installer, skipRecursive, stripVendor bool) { cache.SystemLock() base := "." EnsureGopath() EnsureVendorDir() conf := EnsureConfig() // Try to check out the initial dependencies. if err := installer.Checkout(conf); err != nil { msg.Die("Failed to do initial checkout of config: %s", err) } // Set the versions for the initial dependencies so that resolved dependencies // are rooted in the correct version of the base. if err := repo.SetReference(conf, installer.ResolveTest); err != nil { msg.Die("Failed to set initial config references: %s", err) } // Prior to resolving dependencies we need to start working with a clone // of the conf because we'll be making real changes to it. confcopy := conf.Clone() if !skipRecursive { // Get all repos and update them. err := installer.Update(confcopy) if err != nil { msg.Die("Could not update packages: %s", err) } // Set references. There may be no remaining references to set since the // installer set them as it went to make sure it parsed the right imports // from the right version of the package. msg.Info("Setting references for remaining imports") if err := repo.SetReference(confcopy, installer.ResolveTest); err != nil { msg.Err("Failed to set references: %s (Skip to cleanup)", err) } } err := installer.Export(confcopy) if err != nil { msg.Die("Unable to export dependencies to vendor directory: %s", err) } // Write glide.yaml (Why? Godeps/GPM/GB?) // I think we don't need to write a new Glide file because update should not // change anything important. It will just generate information about // transative dependencies, all of which belongs exclusively in the lock // file, not the glide.yaml file. // TODO(mattfarina): Detect when a new dependency has been added or removed // from the project. A removed dependency should warn and an added dependency // should be added to the glide.yaml file. See issue #193. if !skipRecursive { // Write lock hash, err := conf.Hash() if err != nil { msg.Die("Failed to generate config hash. Unable to generate lock file.") } lock, err := cfg.NewLockfile(confcopy.Imports, confcopy.DevImports, hash) if err != nil { msg.Die("Failed to generate lock file: %s", err) } wl := true if gpath.HasLock(base) { yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile)) if err == nil { l2, err := cfg.LockfileFromYaml(yml) if err == nil { f1, err := l2.Fingerprint() f2, err2 := lock.Fingerprint() if err == nil && err2 == nil && f1 == f2 { wl = false } } } } if wl { if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil { msg.Err("Could not write lock file to %s: %s", base, err) return } } else { msg.Info("Versions did not change. Skipping glide.lock update.") } msg.Info("Project relies on %d dependencies.", len(confcopy.Imports)) } else { msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated") } if stripVendor { msg.Info("Removing nested vendor and Godeps/_workspace directories...") err := gpath.StripVendor() if err != nil { msg.Err("Unable to strip vendor directories: %s", err) } } }