Ejemplo n.º 1
0
func cmdFrom(c *cli.Context) error {
	repos, err := repos(c.Bool("all"), c.Args()...)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed gathering repo list`), err)
	}

	uniq := c.Bool("uniq")
	namespace := ""
	applyConstraints := c.Bool("apply-constraints")

	for _, repo := range repos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}

		for _, entry := range r.Entries() {
			if applyConstraints && r.SkipConstraints(entry) {
				continue
			}

			from, err := r.DockerFrom(&entry)
			if err != nil {
				return cli.NewMultiError(fmt.Errorf(`failed fetching/scraping FROM for %q (tags %q)`, r.RepoName, entry.TagsString()), err)
			}

			for _, tag := range r.Tags(namespace, uniq, entry) {
				fmt.Printf("%s: %s\n", tag, from)
			}
		}
	}

	return nil
}
Ejemplo n.º 2
0
func cmdList(c *cli.Context) error {
	repos, err := repos(c.Bool("all"), c.Args()...)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed gathering repo list`), err)
	}

	uniq := c.Bool("uniq")
	namespace := ""
	applyConstraints := c.Bool("apply-constraints")
	onlyRepos := c.Bool("repos")

	buildOrder := c.Bool("build-order")
	if buildOrder {
		repos, err = sortRepos(repos, applyConstraints)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed sorting repo list`), err)
		}
	}

	for _, repo := range repos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}

		if onlyRepos {
			if r.TagEntry == nil {
				fmt.Printf("%s\n", r.RepoName)
			} else {
				for _, tag := range r.Tags(namespace, uniq, *r.TagEntry) {
					fmt.Printf("%s\n", tag)
				}
			}
			continue
		}

		var entries []manifest.Manifest2822Entry
		if buildOrder {
			entries, err = r.SortedEntries(applyConstraints)
			if err != nil {
				return cli.NewMultiError(fmt.Errorf(`failed sorting entries list for %q`, repo), err)
			}
		} else {
			entries = r.Entries()
		}

		for _, entry := range entries {
			if applyConstraints && r.SkipConstraints(entry) {
				continue
			}

			for _, tag := range r.Tags(namespace, uniq, entry) {
				fmt.Printf("%s\n", tag)
			}
		}
	}

	return nil
}
Ejemplo n.º 3
0
func cmdCat(c *cli.Context) error {
	repos, err := repos(c.Bool("all"), c.Args()...)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed gathering repo list`), err)
	}

	format := c.String("format")
	formatFile := c.String("format-file")

	templateName := "--format"
	tmplMultiErr := fmt.Errorf(`failed parsing --format %q`, format)
	if formatFile != "" {
		b, err := ioutil.ReadFile(formatFile)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed reading --format-file %q`, formatFile), err)
		}
		templateName = formatFile
		tmplMultiErr = fmt.Errorf(`failed parsing --format-file %q`, formatFile)
		format = string(b)
	}

	var i int
	tmpl, err := template.New(templateName).Funcs(templatelib.FuncMap).Funcs(template.FuncMap{
		"i": func() int {
			return i
		},
	}).Parse(format)
	if err != nil {
		return cli.NewMultiError(tmplMultiErr, err)
	}

	var repo string
	for i, repo = range repos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}

		buf := &bytes.Buffer{}
		err = tmpl.ExecuteTemplate(buf, templateName, r)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed executing template`), err)
		}
		out := buf.String()
		fmt.Print(out)
		if !strings.HasSuffix(out, "\n") {
			fmt.Println()
		}
	}

	return nil
}
Ejemplo n.º 4
0
func convertErrChannelToErrorSlice(errs <-chan error) []error {
	errMessages := cli.NewMultiError()
	for err := range errs {
		if err != nil && len(err.Error()) > 0 {
			errMessages.Errors = append(errMessages.Errors, err)
		}
	}
	return errMessages.Errors
}
Ejemplo n.º 5
0
// ConcurrentUpdate takes a list of dependencies and updates in parallel.
func ConcurrentUpdate(deps []*cfg.Dependency, i *Installer, c *cfg.Config) error {
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					loc := dep.Remote()
					key, err := cache.Key(loc)
					if err != nil {
						msg.Die(err.Error())
					}
					cache.Lock(key)
					if err := VcsUpdate(dep, i.Force, i.Updated); err != nil {
						msg.Err("Update failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					cache.Unlock(key)
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range deps {
		if !c.HasIgnore(dep.Name) {
			wg.Add(1)
			in <- dep
		}
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	return returnErr
}
Ejemplo n.º 6
0
// ConcurrentUpdate takes a list of dependencies and updates in parallel.
func ConcurrentUpdate(deps []*cfg.Dependency, cwd string, i *Installer) error {
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	msg.Info("Downloading dependencies. Please wait...")

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					dest := filepath.Join(i.VendorPath(), dep.Name)
					if err := VcsUpdate(dep, dest, i.Home, i.UseCache, i.UseCacheGopath, i.UseGopath, i.Force, i.UpdateVendored); err != nil {
						msg.Warn("Update failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range deps {
		wg.Add(1)
		in <- dep
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	return returnErr
}
Ejemplo n.º 7
0
func diffCommandAction(c *cli.Context) error {
	config := diff.Config{
		Certification:  c.Args().First(),
		OpencontrolDir: opencontrolDir,
	}
	inventory, errs := diff.ComputeGapAnalysis(config)
	if errs != nil && len(errs) > 0 {
		return cli.NewExitError(cli.NewMultiError(errs...).Error(), 1)
	}
	fmt.Fprintf(c.App.Writer, "\nNumber of missing controls: %d\n", len(inventory.MissingControlList))
	for _, standardAndControl := range sortmap.ByKey(inventory.MissingControlList) {
		fmt.Fprintf(c.App.Writer, "%s\n", standardAndControl.Key)
	}
	return nil
}
Ejemplo n.º 8
0
func cmdPush(c *cli.Context) error {
	repos, err := repos(c.Bool("all"), c.Args()...)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed gathering repo list`), err)
	}

	uniq := c.Bool("uniq")
	namespace := c.String("namespace")

	if namespace == "" {
		return fmt.Errorf(`"--namespace" is a required flag for "push"`)
	}

	for _, repo := range repos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}

		for _, entry := range r.Entries() {
			if r.SkipConstraints(entry) {
				continue
			}

			for _, tag := range r.Tags(namespace, uniq, entry) {
				fmt.Printf("Pushing %s\n", tag)
				err = dockerPush(tag)
				if err != nil {
					return cli.NewMultiError(fmt.Errorf(`failed pushing %q`, tag), err)
				}
			}
		}
	}

	return nil
}
Ejemplo n.º 9
0
// ConcurrentUpdate takes a list of dependencies and updates in parallel.
func ConcurrentUpdate(deps []*cfg.Dependency, cwd string, i *Installer) error {
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					if err := VcsUpdate(dep, cwd, i); err != nil {
						msg.Warn("Update failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range deps {
		wg.Add(1)
		in <- dep
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	return returnErr
}
Ejemplo n.º 10
0
func dockerBuild(tag string, context io.Reader) error {
	args := []string{"build", "-t", tag, "--rm", "--force-rm"}
	args = append(args, "-")
	cmd := exec.Command("docker", args...)
	cmd.Stdin = context
	if debugFlag {
		cmd.Stdout = os.Stdout
		cmd.Stderr = os.Stderr
		fmt.Printf("$ docker %q\n", args)
		return cmd.Run()
	} else {
		buf := &bytes.Buffer{}
		cmd.Stdout = buf
		cmd.Stderr = buf
		err := cmd.Run()
		if err != nil {
			err = cli.NewMultiError(err, fmt.Errorf(`docker %q output:%s`, args, "\n"+buf.String()))
		}
		return err
	}
}
Ejemplo n.º 11
0
// Export from the cache to the vendor directory
func (i *Installer) Export(conf *cfg.Config) error {
	tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor")
	if err != nil {
		return err
	}
	defer func() {
		err = os.RemoveAll(tempDir)
		if err != nil {
			msg.Err(err.Error())
		}
	}()

	vp := filepath.Join(tempDir, "vendor")
	err = os.MkdirAll(vp, 0755)

	msg.Info("Exporting resolved dependencies...")
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					loc := dep.Remote()
					key, err := cache.Key(loc)
					if err != nil {
						msg.Die(err.Error())
					}
					cache.Lock(key)

					cdir := filepath.Join(cache.Location(), "src", key)
					repo, err := dep.GetRepo(cdir)
					if err != nil {
						msg.Die(err.Error())
					}
					msg.Info("--> Exporting %s", dep.Name)
					if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil {
						msg.Err("Export failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					cache.Unlock(key)
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range conf.Imports {
		if !conf.HasIgnore(dep.Name) {
			err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
			if err != nil {
				lock.Lock()
				if returnErr == nil {
					returnErr = err
				} else {
					returnErr = cli.NewMultiError(returnErr, err)
				}
				lock.Unlock()
			}
			wg.Add(1)
			in <- dep
		}
	}

	if i.ResolveTest {
		for _, dep := range conf.DevImports {
			if !conf.HasIgnore(dep.Name) {
				err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
				if err != nil {
					lock.Lock()
					if returnErr == nil {
						returnErr = err
					} else {
						returnErr = cli.NewMultiError(returnErr, err)
					}
					lock.Unlock()
				}
				wg.Add(1)
				in <- dep
			}
		}
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	if returnErr != nil {
		return returnErr
	}

	msg.Info("Replacing existing vendor dependencies")
	err = os.RemoveAll(i.VendorPath())
	if err != nil {
		return err
	}

	err = os.Rename(vp, i.VendorPath())

	if err != nil {
		// When there are different physical devices we cannot rename cross device.
		// Instead we copy.
		switch terr := err.(type) {
		case *os.LinkError:
			// syscall.EXDEV is the common name for the cross device link error
			// which has varying output text across different operating systems.
			if terr.Err == syscall.EXDEV {
				msg.Debug("Cross link err, trying manual copy: %s", err)
				return gpath.CopyDir(vp, i.VendorPath())
			} else if runtime.GOOS == "windows" {
				// In windows it can drop down to an operating system call that
				// returns an operating system error with a different number and
				// message. Checking for that as a fall back.
				noerr, ok := terr.Err.(syscall.Errno)
				// 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error.
				// See https://msdn.microsoft.com/en-us/library/cc231199.aspx
				if ok && noerr == 0x11 {
					msg.Debug("Cross link err on Windows, trying manual copy: %s", err)
					return gpath.CopyDir(vp, i.VendorPath())
				}
			}
		}
	}

	return err

}
Ejemplo n.º 12
0
// NewCLIApp creates a new instances of the CLI
func NewCLIApp() *cli.App {
	app := cli.NewApp()
	app.Name = "Compliance Masonry"
	app.Usage = "Open Control CLI Tool"
	app.Version = "1.1.1"
	app.Flags = []cli.Flag{
		cli.BoolFlag{
			Name:  "verbose",
			Usage: "Indicates whether to run the command with verbosity.",
		},
	}
	app.Before = func(c *cli.Context) error {
		// Resets the log to output to nothing
		log.SetOutput(ioutil.Discard)
		if c.Bool("verbose") {
			log.SetOutput(os.Stderr)
			log.Println("Running with verbosity")
		}
		return nil
	}
	app.Commands = []cli.Command{
		{
			Name:    "get",
			Aliases: []string{"g"},
			Usage:   "Install compliance dependencies",
			Flags: []cli.Flag{
				cli.StringFlag{
					Name:  "dest",
					Value: constants.DefaultDestination,
					Usage: "Location to download the repos.",
				},
				cli.StringFlag{
					Name:  "config",
					Value: constants.DefaultConfigYaml,
					Usage: "Location of system yaml",
				},
			},
			Action: func(c *cli.Context) error {
				f := fs.OSUtil{}
				config := c.String("config")
				configBytes, err := f.OpenAndReadFile(config)
				if err != nil {
					app.Writer.Write([]byte(err.Error()))
					os.Exit(1)
				}
				wd, err := os.Getwd()
				if err != nil {
					app.Writer.Write([]byte(err.Error()))
					os.Exit(1)
				}
				destination := filepath.Join(wd, c.String("dest"))
				err = Get(destination,
					configBytes,
					&common.ConfigWorker{Downloader: common.NewVCSDownloader(), Parser: parser.Parser{}, ResourceMap: mapset.Init(), FSUtil: f})
				if err != nil {
					return cli.NewExitError(err.Error(), 1)
				}
				app.Writer.Write([]byte("Compliance Dependencies Installed"))
				return nil
			},
		},
		{
			Name:    "docs",
			Aliases: []string{"d"},
			Usage:   "Create Documentation",
			Subcommands: []cli.Command{
				{
					Name:    "gitbook",
					Aliases: []string{"g"},
					Usage:   "Create Gitbook Documentation",
					Flags: []cli.Flag{
						cli.StringFlag{
							Name:        "opencontrols, o",
							Value:       "opencontrols",
							Usage:       "Set opencontrols directory",
							Destination: &opencontrolDir,
						},
						cli.StringFlag{
							Name:        "exports, e",
							Value:       "exports",
							Usage:       "Sets the export directory",
							Destination: &exportPath,
						},
						cli.StringFlag{
							Name:        "markdowns, m",
							Value:       "markdowns",
							Usage:       "Sets the markdowns directory",
							Destination: &markdownPath,
						},
					},
					Action: func(c *cli.Context) error {
						config := gitbook.Config{
							Certification:  c.Args().First(),
							OpencontrolDir: opencontrolDir,
							ExportPath:     exportPath,
							MarkdownPath:   markdownPath,
						}
						warning, errMessages := docs.MakeGitbook(config)
						if warning != "" {
							app.Writer.Write([]byte(warning))
						}
						if errMessages != nil && len(errMessages) > 0 {
							err := cli.NewMultiError(errMessages...)
							return cli.NewExitError(err.Error(), 1)
						} else {
							app.Writer.Write([]byte("New Gitbook Documentation Created"))
							return nil
						}
					},
				},
				{
					Name:    "docx",
					Aliases: []string{"d"},
					Usage:   "Create Docx Documentation using a Template",
					Flags: []cli.Flag{
						cli.StringFlag{
							Name:        "opencontrols, o",
							Value:       "opencontrols",
							Usage:       "Set opencontrols directory",
							Destination: &opencontrolDir,
						},
						cli.StringFlag{
							Name:        "template, t",
							Value:       "",
							Usage:       "Set template to build",
							Destination: &templatePath,
						},
						cli.StringFlag{
							Name:        "export, e",
							Value:       "export.docx",
							Usage:       "Sets the export directory",
							Destination: &exportPath,
						},
					},
					Action: func(c *cli.Context) error {
						config := docx.Config{
							OpencontrolDir: opencontrolDir,
							TemplatePath:   templatePath,
							ExportPath:     exportPath,
						}
						if err := docs.BuildTemplate(config); err != nil && len(err.Error()) > 0 {
							return cli.NewExitError(err.Error(), 1)
						} else {
							app.Writer.Write([]byte("New Docx Created"))
							return nil
						}
					},
				},
			},
		},
		diffCommand,
	}
	return app
}
Ejemplo n.º 13
0
func cmdBuild(c *cli.Context) error {
	repos, err := repos(c.Bool("all"), c.Args()...)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed gathering repo list`), err)
	}

	repos, err = sortRepos(repos)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed sorting repo list`, err))
	}

	uniq := c.Bool("uniq")
	namespace := c.String("namespace")
	pull := c.String("pull")
	switch pull {
	case "always", "missing", "never":
		// legit
	default:
		return fmt.Errorf(`invalid value for --pull: %q`, pull)
	}

	for _, repo := range repos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}

		entries, err := r.SortedEntries()
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed sorting entries list for %q`, repo), err)
		}

		for _, entry := range entries {
			if r.SkipConstraints(entry) {
				continue
			}

			from, err := r.DockerFrom(&entry)
			if err != nil {
				return cli.NewMultiError(fmt.Errorf(`failed fetching/scraping FROM for %q (tags %q)`, r.RepoName, entry.TagsString()), err)
			}

			if from != "scratch" && pull != "never" {
				doPull := false
				switch pull {
				case "always":
					doPull = true
				case "missing":
					_, err := dockerInspect("{{.Id}}", from)
					doPull = (err != nil)
				default:
					return fmt.Errorf(`unexpected value for --pull: %s`, pull)
				}
				if doPull {
					fmt.Printf("Pulling %s (%s)\n", from, r.Identifier())
					dockerPull(from)
				}
			}

			cacheHash, err := r.dockerCacheHash(&entry)
			if err != nil {
				return cli.NewMultiError(fmt.Errorf(`failed calculating "cache hash" for %q (tags %q)`, r.RepoName, entry.TagsString()), err)
			}

			cacheTag := "bashbrew/cache:" + cacheHash

			// check whether we've already built this artifact
			_, err = dockerInspect("{{.Id}}", cacheTag)
			if err != nil {
				fmt.Printf("Building %s (%s)\n", cacheTag, r.Identifier())

				commit, err := r.fetchGitRepo(&entry)
				if err != nil {
					return cli.NewMultiError(fmt.Errorf(`failed fetching git repo for %q (tags %q)`, r.RepoName, entry.TagsString()), err)
				}

				archive, err := gitArchive(commit, entry.Directory)
				if err != nil {
					return cli.NewMultiError(fmt.Errorf(`failed generating git archive for %q (tags %q)`, r.RepoName, entry.TagsString()), err)
				}
				defer archive.Close()

				err = dockerBuild(cacheTag, archive)
				if err != nil {
					return cli.NewMultiError(fmt.Errorf(`failed building %q (tags %q)`, r.RepoName, entry.TagsString()), err)
				}
			} else {
				fmt.Printf("Using %s (%s)\n", cacheTag, r.Identifier())
			}

			for _, tag := range r.Tags(namespace, uniq, entry) {
				fmt.Printf("Tagging %s\n", tag)

				err := dockerTag(cacheTag, tag)
				if err != nil {
					return cli.NewMultiError(fmt.Errorf(`failed tagging %q as %q`, cacheTag, tag), err)
				}
			}
		}
	}

	return nil
}
Ejemplo n.º 14
0
// SetReference is a command to set the VCS reference (commit id, tag, etc) for
// a project.
func SetReference(conf *cfg.Config, resolveTest bool) error {

	if len(conf.Imports) == 0 && len(conf.DevImports) == 0 {
		msg.Info("No references set.\n")
		return nil
	}

	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	for i := 0; i < concurrentWorkers; i++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:

					var loc string
					if dep.Repository != "" {
						loc = dep.Repository
					} else {
						loc = "https://" + dep.Name
					}
					key, err := cache.Key(loc)
					if err != nil {
						msg.Die(err.Error())
					}
					cache.Lock(key)
					if err := VcsVersion(dep); err != nil {
						msg.Err("Failed to set version on %s to %s: %s\n", dep.Name, dep.Reference, err)

						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					cache.Unlock(key)
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range conf.Imports {
		if !conf.HasIgnore(dep.Name) {
			wg.Add(1)
			in <- dep
		}
	}

	if resolveTest {
		for _, dep := range conf.DevImports {
			if !conf.HasIgnore(dep.Name) {
				wg.Add(1)
				in <- dep
			}
		}
	}

	wg.Wait()
	// Close goroutines setting the version
	for i := 0; i < concurrentWorkers; i++ {
		done <- struct{}{}
	}
	// close(done)
	// close(in)

	return returnErr
}
Ejemplo n.º 15
0
func cmdFamily(parents bool, c *cli.Context) error {
	depsRepos, err := repos(c.Bool("all"), c.Args()...)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed gathering repo list`), err)
	}

	uniq := c.Bool("uniq")
	applyConstraints := c.Bool("apply-constraints")

	allRepos, err := repos(true)
	if err != nil {
		return cli.NewMultiError(fmt.Errorf(`failed gathering ALL repos list`), err)
	}

	// create network (all repos)
	network := topsort.NewNetwork()

	// add nodes
	for _, repo := range allRepos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}

		for _, entry := range r.Entries() {
			if applyConstraints && r.SkipConstraints(entry) {
				continue
			}

			for _, tag := range r.Tags("", false, entry) {
				network.AddNode(tag, entry)
			}
		}
	}

	// add edges
	for _, repo := range allRepos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}
		for _, entry := range r.Entries() {
			if applyConstraints && r.SkipConstraints(entry) {
				continue
			}

			from, err := r.DockerFrom(&entry)
			if err != nil {
				return cli.NewMultiError(fmt.Errorf(`failed fetching/scraping FROM for %q (tags %q)`, r.RepoName, entry.TagsString()), err)
			}
			for _, tag := range r.Tags("", false, entry) {
				network.AddEdge(from, tag)
			}
		}
	}

	// now the real work
	seen := map[*topsort.Node]bool{}
	for _, repo := range depsRepos {
		r, err := fetch(repo)
		if err != nil {
			return cli.NewMultiError(fmt.Errorf(`failed fetching repo %q`, repo), err)
		}

		for _, entry := range r.Entries() {
			if applyConstraints && r.SkipConstraints(entry) {
				continue
			}

			for _, tag := range r.Tags("", uniq, entry) {
				nodes := []*topsort.Node{}
				if parents {
					nodes = append(nodes, network.Get(tag).InboundEdges...)
				} else {
					nodes = append(nodes, network.Get(tag).OutboundEdges...)
				}
				for len(nodes) > 0 {
					node := nodes[0]
					nodes = nodes[1:]
					if seen[node] {
						continue
					}
					seen[node] = true
					fmt.Printf("%s\n", node.Name)
					if parents {
						nodes = append(nodes, node.InboundEdges...)
					} else {
						nodes = append(nodes, node.OutboundEdges...)
					}
				}
			}
		}
	}

	return nil
}
Ejemplo n.º 16
0
// Export from the cache to the vendor directory
func (i *Installer) Export(conf *cfg.Config) error {
	tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor")
	if err != nil {
		return err
	}
	defer func() {
		err = os.RemoveAll(tempDir)
		if err != nil {
			msg.Err(err.Error())
		}
	}()

	vp := filepath.Join(tempDir, "vendor")
	err = os.MkdirAll(vp, 0755)

	msg.Info("Exporting resolved dependencies...")
	done := make(chan struct{}, concurrentWorkers)
	in := make(chan *cfg.Dependency, concurrentWorkers)
	var wg sync.WaitGroup
	var lock sync.Mutex
	var returnErr error

	for ii := 0; ii < concurrentWorkers; ii++ {
		go func(ch <-chan *cfg.Dependency) {
			for {
				select {
				case dep := <-ch:
					loc := dep.Remote()
					key, err := cache.Key(loc)
					if err != nil {
						msg.Die(err.Error())
					}
					cache.Lock(key)

					cdir := filepath.Join(cache.Location(), "src", key)
					repo, err := dep.GetRepo(cdir)
					if err != nil {
						msg.Die(err.Error())
					}
					msg.Info("--> Exporting %s", dep.Name)
					if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil {
						msg.Err("Export failed for %s: %s\n", dep.Name, err)
						// Capture the error while making sure the concurrent
						// operations don't step on each other.
						lock.Lock()
						if returnErr == nil {
							returnErr = err
						} else {
							returnErr = cli.NewMultiError(returnErr, err)
						}
						lock.Unlock()
					}
					cache.Unlock(key)
					wg.Done()
				case <-done:
					return
				}
			}
		}(in)
	}

	for _, dep := range conf.Imports {
		if !conf.HasIgnore(dep.Name) {
			err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
			if err != nil {
				lock.Lock()
				if returnErr == nil {
					returnErr = err
				} else {
					returnErr = cli.NewMultiError(returnErr, err)
				}
				lock.Unlock()
			}
			wg.Add(1)
			in <- dep
		}
	}

	if i.ResolveTest {
		for _, dep := range conf.DevImports {
			if !conf.HasIgnore(dep.Name) {
				err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
				if err != nil {
					lock.Lock()
					if returnErr == nil {
						returnErr = err
					} else {
						returnErr = cli.NewMultiError(returnErr, err)
					}
					lock.Unlock()
				}
				wg.Add(1)
				in <- dep
			}
		}
	}

	wg.Wait()

	// Close goroutines setting the version
	for ii := 0; ii < concurrentWorkers; ii++ {
		done <- struct{}{}
	}

	if returnErr != nil {
		return returnErr
	}

	msg.Info("Replacing existing vendor dependencies")
	err = os.RemoveAll(i.VendorPath())
	if err != nil {
		return err
	}

	err = os.Rename(vp, i.VendorPath())

	// When there are different physical devices we cannot rename cross device.
	// Fall back to manual copy.
	if err != nil && strings.Contains(err.Error(), "cross-device link") {
		msg.Debug("Cross link err, trying manual copy: %s", err)

		err = gpath.CopyDir(vp, i.VendorPath())
	}

	return err

}