Esempio n. 1
0
func (c *APIUnitsCmd) Execute(args []string) error {
	context, err := prepareCommandContext(c.Args.Dir.String())
	if err != nil {
		return err
	}

	var unitSlice []unit.SourceUnit
	unitSuffix := buildstore.DataTypeSuffix(unit.SourceUnit{})
	foundUnit := false
	w := fs.WalkFS(".", context.commitFS)
	for w.Step() {
		unitFile := w.Path()
		if strings.HasSuffix(unitFile, unitSuffix) {
			var unit unit.SourceUnit
			foundUnit = true
			f, err := context.commitFS.Open(unitFile)
			if err != nil {
				return err
			}
			defer f.Close()
			if err := json.NewDecoder(f).Decode(&unit); err != nil {
				return fmt.Errorf("%s: %s", unitFile, err)
			}
			unitSlice = append(unitSlice, unit)
		}
	}

	if foundUnit == false {
		return errors.New("No source units found. Try running `src config` first.")
	}

	return json.NewEncoder(os.Stdout).Encode(unitSlice)
}
Esempio n. 2
0
// ListRepoPaths implements RepoPaths.
func (defaultRepoPaths) ListRepoPaths(vfs rwvfs.WalkableFileSystem, after string, max int) ([][]string, error) {
	var paths [][]string
	w := fs.WalkFS(".", rwvfs.Walkable(vfs))
	for w.Step() {
		if err := w.Err(); err != nil {
			return nil, err
		}
		fi := w.Stat()
		if w.Path() >= after && fi.Mode().IsDir() {
			if fi.Name() == SrclibStoreDir {
				w.SkipDir()
				paths = append(paths, strings.Split(filepath.ToSlash(w.Path()), "/"))
				if max != 0 && len(paths) >= max {
					break
				}
				continue
			}
			if fi.Name() != "." && strings.HasPrefix(fi.Name(), ".") {
				w.SkipDir()
				continue
			}
		}
	}
	return paths, nil
}
Esempio n. 3
0
File: store.go Progetto: vkz/srclib
// RemoveAll removes a tree recursively.
func RemoveAll(path string, vfs rwvfs.WalkableFileSystem) error {
	w := fs.WalkFS(path, vfs)

	remove := func(par *parallel.Run, path string) {
		par.Do(func() error { return vfs.Remove(path) })
	}

	var dirs []string // remove dirs after removing all files
	filesPar := parallel.NewRun(20)
	for w.Step() {
		if err := w.Err(); err != nil {
			return err
		}
		if w.Stat().IsDir() {
			dirs = append(dirs, w.Path())
		} else {
			remove(filesPar, w.Path())
		}
	}

	if err := filesPar.Wait(); err != nil {
		return err
	}

	dirsPar := parallel.NewRun(20)
	sort.Sort(sort.Reverse(sort.StringSlice(dirs))) // reverse so we delete leaf dirs first
	for _, dir := range dirs {
		remove(dirsPar, dir)
	}
	return dirsPar.Wait()
}
Esempio n. 4
0
// ListRepoPaths implements RepoPaths.
func (defaultRepoPaths) ListRepoPaths(vfs rwvfs.WalkableFileSystem, after string, max int) ([][]string, error) {
	var paths [][]string
	w := fs.WalkFS(".", rwvfs.Walkable(vfs))
	for w.Step() {
		if err := w.Err(); err != nil {
			return nil, err
		}
		fi := w.Stat()
		if w.Path() >= after && fi.Mode().IsDir() {
			if fi.Name() == SrclibStoreDir {
				w.SkipDir()
				// NOTE: This assumes that the vfs's path
				// separator is "/", which is not true in general.
				paths = append(paths, strings.Split(w.Path(), "/"))
				if max != 0 && len(paths) >= max {
					break
				}
				continue
			}
			if fi.Name() != "." && strings.HasPrefix(fi.Name(), ".") {
				w.SkipDir()
				continue
			}
		}
	}
	return paths, nil
}
Esempio n. 5
0
func (c *BuildDataFetchCmd) Execute(args []string) error {
	localFS, localRepoLabel, err := c.getLocalFileSystem()
	if err != nil {
		return err
	}

	remoteFS, remoteRepoLabel, repoRevSpec, err := c.getRemoteFileSystem()
	if err != nil {
		return err
	}

	// Use uncached API client because the .srclib-cache already
	// caches it, and we want to be able to stream large files.
	//
	// TODO(sqs): this uncached client isn't authed because it doesn't
	// have the other API client's http.Client or http.RoundTripper
	cl := newAPIClientWithAuth(false)
	remoteFS, err = cl.BuildData.FileSystem(repoRevSpec)
	if err != nil {
		return err
	}

	if GlobalOpt.Verbose {
		log.Printf("Fetching remote build files for %s to %s...", remoteRepoLabel, localRepoLabel)
	}

	// TODO(sqs): check if file exists in local cache and don't fetch it if it does and if it is identical

	par := parallel.NewRun(8)
	w := fs.WalkFS(".", rwvfs.Walkable(remoteFS))
	for w.Step() {
		path := w.Path()
		if err := w.Err(); err != nil {
			if path == "." {
				log.Printf("# No build data to pull from %s", remoteRepoLabel)
				return nil
			}
			return fmt.Errorf("walking remote dir tree: %s", err)
		}
		fi := w.Stat()
		if fi == nil {
			continue
		}
		if !fi.Mode().IsRegular() {
			continue
		}
		par.Do(func() error {
			return fetchFile(remoteFS, localFS, path, fi, c.DryRun)
		})
	}
	if err := par.Wait(); err != nil {
		return fmt.Errorf("error fetching: %s", err)
	}
	return nil
}
Esempio n. 6
0
// StatAllRecursive recursively stats all files and dirs in fs,
// starting at path and descending. The Name methods of the returned
// FileInfos returns their full path, not just their filename.
func StatAllRecursive(path string, wfs WalkableFileSystem) ([]os.FileInfo, error) {
	var fis []os.FileInfo
	w := fs.WalkFS(path, wfs)
	for w.Step() {
		if err := w.Err(); err != nil {
			return nil, err
		}
		fis = append(fis, treeFileInfo{w.Path(), w.Stat()})
	}
	return fis, nil
}
Esempio n. 7
0
func getSourceUnits(commitFS rwvfs.WalkableFileSystem, repo *Repo) []string {
	var unitFiles []string
	unitSuffix := buildstore.DataTypeSuffix(unit.SourceUnit{})
	w := fs.WalkFS(".", commitFS)
	for w.Step() {
		if strings.HasSuffix(w.Path(), unitSuffix) {
			unitFiles = append(unitFiles, w.Path())
		}
	}
	return unitFiles
}
Esempio n. 8
0
// Glob returns the names of all files under prefix matching pattern or nil if
// there is no matching file. The syntax of patterns is the same as in
// path/filepath.Match.
func Glob(wfs WalkableFileSystem, prefix, pattern string) (matches []string, err error) {
	walker := fs.WalkFS(filepath.Clean(prefix), wfs)
	for walker.Step() {
		path := walker.Path()
		matched, err := filepath.Match(pattern, path)
		if err != nil {
			return nil, err
		}
		if matched {
			matches = append(matches, path)
		}
	}
	return
}
Esempio n. 9
0
func (s *fsTreeStore) unitFilenames() ([]string, error) {
	var files []string
	w := fs.WalkFS(".", rwvfs.Walkable(s.fs))
	for w.Step() {
		if err := w.Err(); err != nil {
			return nil, err
		}
		fi := w.Stat()
		if fi.Mode().IsRegular() && strings.HasSuffix(fi.Name(), unitFileSuffix) {
			files = append(files, filepath.ToSlash(w.Path()))
		}
	}
	return files, nil
}
Esempio n. 10
0
func (r *FileRepo) Walk(walkFn func(file string)) error {
	walker := fs.WalkFS(".", r.fs)
	for walker.Step() {
		if err := walker.Err(); err != nil {
			return err
		}
		path := walker.Path()
		if walker.Stat().IsDir() || filepath.Ext(path) != fileExtension {
			continue
		}
		walkFn(path[:len(path)-len(fileExtension)])
	}
	return nil
}
Esempio n. 11
0
File: cached.go Progetto: xuy/srclib
// ReadCached reads a Tree's configuration from all of its source unit
// definition files (which may either be in a local VFS rooted at a
// .srclib-cache/<COMMITID> dir, or a remote VFS). It does not read
// the Srcfile; the Srcfile's directives are already accounted for in
// the cached source unit definition files.
//
// bdfs should be a VFS obtained from a call to
// (buildstore.RepoBuildStore).Commit.
func ReadCached(bdfs vfs.FileSystem) (*Tree, error) {
	if _, err := bdfs.Lstat("."); os.IsNotExist(err) {
		return nil, fmt.Errorf("build cache dir does not exist (did you run `srclib config` to create it)?")
	} else if err != nil {
		return nil, err
	}

	// Collect all **/*.unit.json files.
	var unitFiles []string
	unitSuffix := buildstore.DataTypeSuffix(unit.SourceUnit{})
	w := fs.WalkFS(".", rwvfs.Walkable(rwvfs.ReadOnly(bdfs)))
	for w.Step() {
		if err := w.Err(); err != nil {
			return nil, err
		}
		if path := w.Path(); strings.HasSuffix(path, unitSuffix) {
			unitFiles = append(unitFiles, path)
		}
	}

	// Parse units
	sort.Strings(unitFiles)
	units := make([]*unit.SourceUnit, len(unitFiles))
	par := parallel.NewRun(runtime.GOMAXPROCS(0))
	for i_, unitFile_ := range unitFiles {
		i, unitFile := i_, unitFile_
		par.Acquire()
		go func() {
			defer par.Release()
			f, err := bdfs.Open(unitFile)
			if err != nil {
				par.Error(err)
				return
			}
			if err := json.NewDecoder(f).Decode(&units[i]); err != nil {
				f.Close()
				par.Error(err)
				return
			}
			if err := f.Close(); err != nil {
				par.Error(err)
				return
			}
		}()
	}
	if err := par.Wait(); err != nil {
		return nil, err
	}
	return &Tree{SourceUnits: units}, nil
}
Esempio n. 12
0
func (c *APIDepsCmd) Execute(args []string) error {
	// HACK(samertm): append a backslash to Dir to assure that it's parsed
	// as a directory, but Directory should have an unmarshalling
	// method that does this.
	context, err := prepareCommandContext(c.Args.Dir.String())
	if err != nil {
		return err
	}

	var depSlice []*dep.Resolution
	// TODO: Make DataTypeSuffix work with type of depSlice
	depSuffix := buildstore.DataTypeSuffix([]*dep.ResolvedDep{})
	depCache := make(map[string]struct{})
	foundDepresolve := false
	w := fs.WalkFS(".", context.commitFS)
	for w.Step() {
		depfile := w.Path()
		if strings.HasSuffix(depfile, depSuffix) {
			foundDepresolve = true
			var deps []*dep.Resolution
			f, err := context.commitFS.Open(depfile)
			if err != nil {
				return err
			}
			defer f.Close()
			if err := json.NewDecoder(f).Decode(&deps); err != nil {
				return fmt.Errorf("%s: %s", depfile, err)
			}
			for _, d := range deps {
				key, err := d.RawKeyId()
				if err != nil {
					return err
				}
				if _, ok := depCache[key]; !ok {
					depCache[key] = struct{}{}
					depSlice = append(depSlice, d)
				}
			}
		}
	}

	if foundDepresolve == false {
		return errors.New("No dependency information found. Try running `src config` first.")
	}

	return json.NewEncoder(os.Stdout).Encode(depSlice)
}
Esempio n. 13
0
func TestMap_Walk2(t *testing.T) {
	m := map[string]string{"a/b/c/d": "a"}
	mapFS := Map(m)

	var names []string
	w := fs.WalkFS(".", Walkable(Sub(mapFS, "a/b")))
	for w.Step() {
		if err := w.Err(); err != nil {
			t.Fatalf("walk path %q: %s", w.Path(), err)
		}
		names = append(names, w.Path())
	}

	wantNames := []string{".", "c", "c/d"}
	sort.Strings(names)
	sort.Strings(wantNames)
	if !reflect.DeepEqual(names, wantNames) {
		t.Errorf("got entry names %v, want %v", names, wantNames)
	}
}
Esempio n. 14
0
func TestMap_Walk(t *testing.T) {
	m := map[string]string{"a": "a", "b/c": "c", "b/x/y/z": "z"}
	mapFS := rwvfs.Map(m)

	var names []string
	w := fs.WalkFS(".", rwvfs.Walkable(mapFS))
	for w.Step() {
		if err := w.Err(); err != nil {
			t.Fatalf("walk path %q: %s", w.Path(), err)
		}
		names = append(names, w.Path())
	}

	wantNames := []string{".", "a", "b", "b/c", "b/x", "b/x/y", "b/x/y/z"}
	sort.Strings(names)
	sort.Strings(wantNames)
	if !reflect.DeepEqual(names, wantNames) {
		t.Errorf("got entry names %v, want %v", names, wantNames)
	}
}
Esempio n. 15
0
// getSourceUnitsWithFile gets a list of all source units that contain
// the given file.
func getSourceUnitsWithFile(buildStore buildstore.RepoBuildStore, repo *Repo, filename string) ([]*unit.SourceUnit, error) {
	filename = filepath.Clean(filename)

	// TODO(sqs): This whole lookup is totally inefficient. The storage format
	// is not optimized for lookups.
	commitFS := buildStore.Commit(repo.CommitID)
	unitFiles := getSourceUnits(commitFS, repo)

	// Find all source unit definition files.
	unitSuffix := buildstore.DataTypeSuffix(unit.SourceUnit{})
	w := fs.WalkFS(".", commitFS)
	for w.Step() {
		if strings.HasSuffix(w.Path(), unitSuffix) {
			unitFiles = append(unitFiles, w.Path())
		}
	}

	// Find which source units the file belongs to.
	var units []*unit.SourceUnit
	for _, unitFile := range unitFiles {
		var u *unit.SourceUnit
		f, err := commitFS.Open(unitFile)
		if err != nil {
			return nil, err
		}
		defer f.Close()
		if err := json.NewDecoder(f).Decode(&u); err != nil {
			return nil, fmt.Errorf("%s: %s", unitFile, err)
		}
		for _, f2 := range u.Files {
			if filepath.Clean(f2) == filename {
				units = append(units, u)
				break
			}
		}
	}

	return units, nil
}
Esempio n. 16
0
func (r *FormRepo) List() ([]Form, error) {
	forms := make([]Form, 0)
	walker := fs.WalkFS(".", r.fs)
	for walker.Step() {
		if err := walker.Err(); err != nil {
			return nil, err
		}
		if !walker.Stat().IsDir() {
			continue
		}
		form, err := r.Fields(walker.Path())
		switch err {
		case ErrNotFound: // Ignore
		case nil:
			if len(form.Fields) > 0 {
				forms = append(forms, *form)
			}
		default:
			return nil, err
		}
	}
	return forms, nil
}
Esempio n. 17
0
func (c *BuildDataUploadCmd) Execute(args []string) error {
	localFS, localRepoLabel, err := c.getLocalFileSystem()
	if err != nil {
		return err
	}

	remoteFS, remoteRepoLabel, _, err := c.getRemoteFileSystem()
	if err != nil {
		return err
	}

	if GlobalOpt.Verbose {
		log.Printf("Uploading build files from %s to %s...", localRepoLabel, remoteRepoLabel)
	}

	// TODO(sqs): check if file exists remotely and don't upload it if it does and if it is identical

	par := parallel.NewRun(8)
	w := fs.WalkFS(".", rwvfs.Walkable(localFS))
	for w.Step() {
		if err := w.Err(); err != nil {
			return err
		}
		fi := w.Stat()
		if fi == nil {
			continue
		}
		if !fi.Mode().IsRegular() {
			continue
		}
		path := w.Path()
		par.Do(func() error {
			return uploadFile(localFS, remoteFS, path, fi, c.DryRun)
		})
	}
	return par.Wait()
}
Esempio n. 18
0
func (c *BuildDataListCmd) Execute(args []string) error {
	if c.URLs && c.Local {
		return fmt.Errorf("using --urls is incompatible with the build-data -l/--local option because local build data files do not have a URL")
	}
	if c.URLs {
		c.Long = true
	}
	dir := c.Args.Dir
	if dir == "" {
		dir = "."
	}

	bdfs, repoLabel, err := c.getFileSystem()
	if err != nil {
		return err
	}

	if GlobalOpt.Verbose {
		log.Printf("Listing build files for %s in dir %q", repoLabel, dir)
	}

	// Only used for constructing the URLs for remote build data.
	var repoRevSpec sourcegraph.RepoRevSpec
	if !c.Local {
		cl := NewAPIClientWithAuthIfPresent()
		rrepo, err := getRemoteRepo(cl)
		if err != nil {
			return err
		}
		repoRevSpec.RepoSpec = rrepo.RepoSpec()

		lrepo, err := openLocalRepo()
		if err != nil {
			return err
		}
		repoRevSpec.Rev = lrepo.CommitID
		repoRevSpec.CommitID = lrepo.CommitID
	}

	printFile := func(fi os.FileInfo) {
		if c.Type == "f" && !fi.Mode().IsRegular() {
			return
		}
		if c.Type == "d" && !fi.Mode().IsDir() {
			return
		}

		var suffix string
		if fi.IsDir() {
			suffix = string(filepath.Separator)
		}

		var urlStr string
		if c.URLs {
			spec := sourcegraph.BuildDataFileSpec{RepoRev: repoRevSpec, Path: filepath.Join(dir, fi.Name())}

			// TODO(sqs): use sourcegraph.Router when it is merged to go-sourcegraph master
			u, err := router.NewAPIRouter(nil).Get(router.RepoBuildDataEntry).URLPath(router.MapToArray(spec.RouteVars())...)
			if err != nil {
				log.Fatal(err)
			}

			// Strip leading "/" so that the URL is relative to the
			// endpoint URL even if the endpoint URL contains a path.
			urlStr = getEndpointURL().ResolveReference(&url.URL{Path: u.Path[1:]}).String()
		}

		if c.Long {
			var timeStr string
			if !fi.ModTime().IsZero() {
				timeStr = fi.ModTime().Format("Jan _2 15:04")
			}
			fmt.Printf("% 7d %12s %s%s %s\n", fi.Size(), timeStr, fi.Name(), suffix, urlStr)
		} else {
			fmt.Println(fi.Name() + suffix)
		}
	}

	var fis []os.FileInfo
	if c.Recursive {
		w := fs.WalkFS(dir, rwvfs.Walkable(bdfs))
		for w.Step() {
			if err := w.Err(); err != nil {
				return err
			}
			printFile(treeFileInfo{w.Path(), w.Stat()})
		}
	} else {
		fis, err = bdfs.ReadDir(dir)
		if err != nil {
			return err
		}
		for _, fi := range fis {
			printFile(fi)
		}
	}

	return nil
}
Esempio n. 19
0
// Walk returns a new Walker rooted at root.
func (c *Client) Walk(root string) *fs.Walker {
	return fs.WalkFS(root, c)
}
Esempio n. 20
0
func (c *BuildDataFetchCmd) Execute(args []string) error {
	localFS, localRepoLabel, err := c.getLocalFileSystem()
	if err != nil {
		return err
	}

	remoteFS, remoteRepoLabel, repoRevSpec, err := c.getRemoteFileSystem()
	if err != nil {
		return err
	}

	// Use uncached API client because the .srclib-cache already
	// caches it, and we want to be able to stream large files.
	//
	// TODO(sqs): this uncached client isn't authed because it doesn't
	// have the other API client's http.Client or http.RoundTripper
	cl := newAPIClientWithAuth(false)
	if c.Latest {
		repoBuildInfo, _, err := cl.Repos.GetBuild(repoRevSpec, &sourcegraph.RepoGetBuildOptions{Exact: false})
		if err != nil {
			return err
		}
		if repoBuildInfo.LastSuccessful != nil {
			repoSpec := sourcegraph.RepoSpec{URI: *repoBuildInfo.LastSuccessful.RepoURI, RID: repoBuildInfo.LastSuccessful.Repo}
			repoRevSpec = sourcegraph.RepoRevSpec{
				RepoSpec: repoSpec,
				Rev:      repoBuildInfo.LastSuccessful.CommitID,
				CommitID: repoBuildInfo.LastSuccessful.CommitID,
			}
			remoteRepoLabel = fmt.Sprintf("remote repository (URI %s, commit %s)", repoRevSpec.URI, repoRevSpec.CommitID)
			// note remoteFS isn't used, otherwise we should recall getRemoteFileSystem() on new Commit
			if GlobalOpt.Verbose {
				log.Printf("Setting fetch to latest build: %s", remoteRepoLabel)
			}
		}
	}
	remoteFS, err = cl.BuildData.FileSystem(repoRevSpec)
	if err != nil {
		return err
	}

	if GlobalOpt.Verbose {
		log.Printf("Fetching remote build files for %s to %s...", remoteRepoLabel, localRepoLabel)
	}

	// TODO(sqs): check if file exists in local cache and don't fetch it if it does and if it is identical

	par := parallel.NewRun(8)
	w := fs.WalkFS(".", rwvfs.Walkable(remoteFS))
	for w.Step() {
		path := w.Path()
		if err := w.Err(); err != nil {
			if path == "." {
				log.Printf("# No build data to pull from %s", remoteRepoLabel)
				return nil
			}
			return fmt.Errorf("walking remote dir tree: %s", err)
		}
		fi := w.Stat()
		if fi == nil {
			continue
		}
		if !fi.Mode().IsRegular() {
			continue
		}
		par.Do(func() error {
			return fetchFile(remoteFS, localFS, path, fi, c.DryRun)
		})
	}
	if err := par.Wait(); err != nil {
		return fmt.Errorf("error fetching: %s", err)
	}
	return nil
}