// ListRepoPaths implements RepoPaths. func (defaultRepoPaths) ListRepoPaths(vfs rwvfs.WalkableFileSystem, after string, max int) ([][]string, error) { var paths [][]string w := fs.WalkFS(".", rwvfs.Walkable(vfs)) for w.Step() { if err := w.Err(); err != nil { return nil, err } fi := w.Stat() if w.Path() >= after && fi.Mode().IsDir() { if fi.Name() == SrclibStoreDir { w.SkipDir() paths = append(paths, strings.Split(filepath.ToSlash(w.Path()), "/")) if max != 0 && len(paths) >= max { break } continue } if fi.Name() != "." && strings.HasPrefix(fi.Name(), ".") { w.SkipDir() continue } } } return paths, nil }
// ListRepoPaths implements RepoPaths. func (defaultRepoPaths) ListRepoPaths(vfs rwvfs.WalkableFileSystem, after string, max int) ([][]string, error) { var paths [][]string w := fs.WalkFS(".", rwvfs.Walkable(vfs)) for w.Step() { if err := w.Err(); err != nil { return nil, err } fi := w.Stat() if w.Path() >= after && fi.Mode().IsDir() { if fi.Name() == SrclibStoreDir { w.SkipDir() // NOTE: This assumes that the vfs's path // separator is "/", which is not true in general. paths = append(paths, strings.Split(w.Path(), "/")) if max != 0 && len(paths) >= max { break } continue } if fi.Name() != "." && strings.HasPrefix(fi.Name(), ".") { w.SkipDir() continue } } } return paths, nil }
func TestBuildDataService_ListAll(t *testing.T) { setup() defer teardown() pathPrefix := urlPath(t, router.RepoBuildDataEntry, map[string]string{"RepoSpec": "r.com/x", "Rev": "c", "Path": "."}) fs := rwvfs.Map(map[string]string{ "a": "a", "b/c": "c", "b/d/e": "e", }) mux.Handle(pathPrefix+"/", http.StripPrefix(pathPrefix, rwvfs.HTTPHandler(fs, nil))) fs, err := client.BuildData.FileSystem(RepoRevSpec{RepoSpec: RepoSpec{URI: "r.com/x"}, Rev: "c"}) if err != nil { t.Fatal(err) } entries, err := rwvfs.StatAllRecursive(".", rwvfs.Walkable(fs)) if err != nil { t.Fatalf("StatAllRecursive returned error: %v", err) } names := fileInfoNames(entries) wantNames := []string{".", "a", "b", "b/c", "b/d", "b/d/e"} sort.Strings(names) sort.Strings(wantNames) if !reflect.DeepEqual(names, wantNames) { t.Errorf("got entry names %v, want %v", names, wantNames) } }
func newTestFS() rwvfs.WalkableFileSystem { switch *fsType { case "map": fs := rwvfs.Map(map[string]string{}) return rwvfs.Walkable(rwvfs.Sub(fs, "/testdata")) case "os": tmpDir, err := ioutil.TempDir("", "srclib-test") if err != nil { log.Fatal(err) } fs := rwvfs.OS(tmpDir) setCreateParentDirs(fs) return rwvfs.Walkable(fs) default: log.Fatalf("unrecognized -test.fs option: %q", *fsType) panic("unreachable") } }
// LocalRepo creates a new single-repository build store for the VCS // repository whose top-level directory is repoDir. // // The store is laid out as follows: // // . the root dir of repoStoreFS // <COMMITID>/**/* build data for a specific commit func LocalRepo(repoDir string) (RepoBuildStore, error) { storeDir := filepath.Join(repoDir, BuildDataDirName) if err := os.Mkdir(storeDir, 0700); err != nil && !os.IsExist(err) { return nil, err } fs := rwvfs.OS(storeDir) setCreateParentDirs(fs) return Repo(rwvfs.Walkable(fs)), nil }
// store returns the store specified by StoreCmd's Type and Root // options. func (c *StoreCmd) store() (interface{}, error) { fs := rwvfs.OS(c.Root) type createParents interface { CreateParentDirs(bool) } if fs, ok := fs.(createParents); ok { fs.CreateParentDirs(true) } switch c.Type { case "RepoStore": return store.NewFSRepoStore(rwvfs.Walkable(fs)), nil case "MultiRepoStore": return store.NewFSMultiRepoStore(rwvfs.Walkable(fs), nil), nil default: return nil, fmt.Errorf("unrecognized store --type value: %q (valid values are RepoStore, MultiRepoStore)", c.Type) } }
func (c *BuildDataFetchCmd) Execute(args []string) error { localFS, localRepoLabel, err := c.getLocalFileSystem() if err != nil { return err } remoteFS, remoteRepoLabel, repoRevSpec, err := c.getRemoteFileSystem() if err != nil { return err } // Use uncached API client because the .srclib-cache already // caches it, and we want to be able to stream large files. // // TODO(sqs): this uncached client isn't authed because it doesn't // have the other API client's http.Client or http.RoundTripper cl := newAPIClientWithAuth(false) remoteFS, err = cl.BuildData.FileSystem(repoRevSpec) if err != nil { return err } if GlobalOpt.Verbose { log.Printf("Fetching remote build files for %s to %s...", remoteRepoLabel, localRepoLabel) } // TODO(sqs): check if file exists in local cache and don't fetch it if it does and if it is identical par := parallel.NewRun(8) w := fs.WalkFS(".", rwvfs.Walkable(remoteFS)) for w.Step() { path := w.Path() if err := w.Err(); err != nil { if path == "." { log.Printf("# No build data to pull from %s", remoteRepoLabel) return nil } return fmt.Errorf("walking remote dir tree: %s", err) } fi := w.Stat() if fi == nil { continue } if !fi.Mode().IsRegular() { continue } par.Do(func() error { return fetchFile(remoteFS, localFS, path, fi, c.DryRun) }) } if err := par.Wait(); err != nil { return fmt.Errorf("error fetching: %s", err) } return nil }
// ReadCached reads a Tree's configuration from all of its source unit // definition files (which may either be in a local VFS rooted at a // .srclib-cache/<COMMITID> dir, or a remote VFS). It does not read // the Srcfile; the Srcfile's directives are already accounted for in // the cached source unit definition files. // // bdfs should be a VFS obtained from a call to // (buildstore.RepoBuildStore).Commit. func ReadCached(bdfs vfs.FileSystem) (*Tree, error) { if _, err := bdfs.Lstat("."); os.IsNotExist(err) { return nil, fmt.Errorf("build cache dir does not exist (did you run `srclib config` to create it)?") } else if err != nil { return nil, err } // Collect all **/*.unit.json files. var unitFiles []string unitSuffix := buildstore.DataTypeSuffix(unit.SourceUnit{}) w := fs.WalkFS(".", rwvfs.Walkable(rwvfs.ReadOnly(bdfs))) for w.Step() { if err := w.Err(); err != nil { return nil, err } if path := w.Path(); strings.HasSuffix(path, unitSuffix) { unitFiles = append(unitFiles, path) } } // Parse units sort.Strings(unitFiles) units := make([]*unit.SourceUnit, len(unitFiles)) par := parallel.NewRun(runtime.GOMAXPROCS(0)) for i_, unitFile_ := range unitFiles { i, unitFile := i_, unitFile_ par.Acquire() go func() { defer par.Release() f, err := bdfs.Open(unitFile) if err != nil { par.Error(err) return } if err := json.NewDecoder(f).Decode(&units[i]); err != nil { f.Close() par.Error(err) return } if err := f.Close(); err != nil { par.Error(err) return } }() } if err := par.Wait(); err != nil { return nil, err } return &Tree{SourceUnits: units}, nil }
func (s *fsTreeStore) unitFilenames() ([]string, error) { var files []string w := fs.WalkFS(".", rwvfs.Walkable(s.fs)) for w.Step() { if err := w.Err(); err != nil { return nil, err } fi := w.Stat() if fi.Mode().IsRegular() && strings.HasSuffix(fi.Name(), unitFileSuffix) { files = append(files, filepath.ToSlash(w.Path())) } } return files, nil }
func (s *repoBuildStore) Commit(commitID string) rwvfs.WalkableFileSystem { path := s.commitPath(commitID) // Dereference path if path refers to a symlink, so that we can // walk the tree. e, _ := s.fs.Lstat(path) if e != nil && e.Mode()&os.ModeSymlink != 0 { if fs, ok := s.fs.(rwvfs.LinkFS); ok { var err error dst, err := fs.ReadLink(path) if err == nil { path = dst } else if err == rwvfs.ErrOutsideRoot && FollowCrossFSSymlinks { return rwvfs.Walkable(rwvfs.OS(dst)) } else { log.Printf("Failed to read symlink %s: %s. Using non-dereferenced path.", path, err) } } else { log.Printf("Repository build store path for commit %s is a symlink, but the current VFS %s doesn't support dereferencing symlinks.", commitID, s.fs) } } return rwvfs.Walkable(rwvfs.Sub(s.fs, path)) }
func (c *BuildDataRemoveCmd) Execute(args []string) error { if len(c.Args.Files) == 0 && !c.All { return fmt.Errorf("no files specified") } if c.All { if !c.Local { return fmt.Errorf("--all and --local must be used together") } lrepo, err := openLocalRepo() if err != nil { return err } if err := os.RemoveAll(filepath.Join(lrepo.RootDir, store.SrclibStoreDir)); err != nil { return err } if err := os.RemoveAll(filepath.Join(lrepo.RootDir, buildstore.BuildDataDirName)); err != nil { return err } return nil } bdfs, repoLabel, err := c.getFileSystem() if err != nil { return err } if GlobalOpt.Verbose { log.Printf("Removing build files %v for %s", c.Args.Files, repoLabel) } vfs := removeLoggedFS{rwvfs.Walkable(bdfs)} for _, file := range c.Args.Files { if c.Recursive { if err := buildstore.RemoveAll(file, vfs); err != nil { return err } } else { if err := vfs.Remove(file); err != nil { return err } } } return nil }
func TestMap_Walk2(t *testing.T) { m := map[string]string{"a/b/c/d": "a"} mapFS := rwvfs.Map(m) var names []string w := fs.WalkFS(".", rwvfs.Walkable(rwvfs.Sub(mapFS, "a/b"))) for w.Step() { if err := w.Err(); err != nil { t.Fatalf("walk path %q: %s", w.Path(), err) } names = append(names, w.Path()) } wantNames := []string{".", "c", "c/d"} sort.Strings(names) sort.Strings(wantNames) if !reflect.DeepEqual(names, wantNames) { t.Errorf("got entry names %v, want %v", names, wantNames) } }
func Glob(t *testing.T, fs rwvfs.FileSystem) { label := fmt.Sprintf("%T", fs) files := []string{"x/y/0.txt", "x/y/1.txt", "x/2.txt"} for _, file := range files { err := rwvfs.MkdirAll(fs, filepath.Dir(file)) if err != nil { t.Fatalf("%s: MkdirAll: %s", label, err) } w, err := fs.Create(file) if err != nil { t.Errorf("%s: Create(%q): %s", label, file, err) return } w.Close() } globTests := []struct { prefix string pattern string matches []string }{ {"", "x/y/*.txt", []string{"x/y/0.txt", "x/y/1.txt"}}, {"x/y", "x/y/*.txt", []string{"x/y/0.txt", "x/y/1.txt"}}, {"", "x/*", []string{"x/y", "x/2.txt"}}, } for _, test := range globTests { matches, err := rwvfs.Glob(rwvfs.Walkable(fs), test.prefix, test.pattern) if err != nil { t.Errorf("%s: Glob(prefix=%q, pattern=%q): %s", label, test.prefix, test.pattern, err) continue } sort.Strings(test.matches) sort.Strings(matches) if !reflect.DeepEqual(matches, test.matches) { t.Errorf("%s: Glob(prefix=%q, pattern=%q): got %v, want %v", label, test.prefix, test.pattern, matches, test.matches) } } }
func TestOS_ReadLink_walkable(t *testing.T) { tmpdir, err := ioutil.TempDir("", "rwvfs-test-") if err != nil { t.Fatal("TempDir", err) } defer os.RemoveAll(tmpdir) if err := ioutil.WriteFile(filepath.Join(tmpdir, "myfile"), []byte("hello"), 0600); err != nil { t.Fatal(err) } if err := os.Symlink(filepath.Join(tmpdir, "myfile"), filepath.Join(tmpdir, "mylink")); err != nil { t.Fatal(err) } osfs := rwvfs.OS(tmpdir) dst, err := rwvfs.Walkable(osfs).(rwvfs.LinkFS).ReadLink("mylink") if err != nil { t.Fatal(err) } if want := "myfile"; dst != want { t.Errorf("%s: ReadLink: got %q, want %q", osfs, dst, want) } }
func (c *BuildDataUploadCmd) Execute(args []string) error { localFS, localRepoLabel, err := c.getLocalFileSystem() if err != nil { return err } remoteFS, remoteRepoLabel, _, err := c.getRemoteFileSystem() if err != nil { return err } if GlobalOpt.Verbose { log.Printf("Uploading build files from %s to %s...", localRepoLabel, remoteRepoLabel) } // TODO(sqs): check if file exists remotely and don't upload it if it does and if it is identical par := parallel.NewRun(8) w := fs.WalkFS(".", rwvfs.Walkable(localFS)) for w.Step() { if err := w.Err(); err != nil { return err } fi := w.Stat() if fi == nil { continue } if !fi.Mode().IsRegular() { continue } path := w.Path() par.Do(func() error { return uploadFile(localFS, remoteFS, path, fi, c.DryRun) }) } return par.Wait() }
// NewMulti creates a new multi-repo build store. func NewMulti(fs rwvfs.FileSystem) *MultiStore { return &MultiStore{rwvfs.Walkable(fs)} }
func (c *BuildDataFetchCmd) Execute(args []string) error { localFS, localRepoLabel, err := c.getLocalFileSystem() if err != nil { return err } remoteFS, remoteRepoLabel, repoRevSpec, err := c.getRemoteFileSystem() if err != nil { return err } // Use uncached API client because the .srclib-cache already // caches it, and we want to be able to stream large files. // // TODO(sqs): this uncached client isn't authed because it doesn't // have the other API client's http.Client or http.RoundTripper cl := newAPIClientWithAuth(false) if c.Latest { repoBuildInfo, _, err := cl.Repos.GetBuild(repoRevSpec, &sourcegraph.RepoGetBuildOptions{Exact: false}) if err != nil { return err } if repoBuildInfo.LastSuccessful != nil { repoSpec := sourcegraph.RepoSpec{URI: *repoBuildInfo.LastSuccessful.RepoURI, RID: repoBuildInfo.LastSuccessful.Repo} repoRevSpec = sourcegraph.RepoRevSpec{ RepoSpec: repoSpec, Rev: repoBuildInfo.LastSuccessful.CommitID, CommitID: repoBuildInfo.LastSuccessful.CommitID, } remoteRepoLabel = fmt.Sprintf("remote repository (URI %s, commit %s)", repoRevSpec.URI, repoRevSpec.CommitID) // note remoteFS isn't used, otherwise we should recall getRemoteFileSystem() on new Commit if GlobalOpt.Verbose { log.Printf("Setting fetch to latest build: %s", remoteRepoLabel) } } } remoteFS, err = cl.BuildData.FileSystem(repoRevSpec) if err != nil { return err } if GlobalOpt.Verbose { log.Printf("Fetching remote build files for %s to %s...", remoteRepoLabel, localRepoLabel) } // TODO(sqs): check if file exists in local cache and don't fetch it if it does and if it is identical par := parallel.NewRun(8) w := fs.WalkFS(".", rwvfs.Walkable(remoteFS)) for w.Step() { path := w.Path() if err := w.Err(); err != nil { if path == "." { log.Printf("# No build data to pull from %s", remoteRepoLabel) return nil } return fmt.Errorf("walking remote dir tree: %s", err) } fi := w.Stat() if fi == nil { continue } if !fi.Mode().IsRegular() { continue } par.Do(func() error { return fetchFile(remoteFS, localFS, path, fi, c.DryRun) }) } if err := par.Wait(); err != nil { return fmt.Errorf("error fetching: %s", err) } return nil }
func (c *BuildDataListCmd) Execute(args []string) error { if c.URLs && c.Local { return fmt.Errorf("using --urls is incompatible with the build-data -l/--local option because local build data files do not have a URL") } if c.URLs { c.Long = true } dir := c.Args.Dir if dir == "" { dir = "." } bdfs, repoLabel, err := c.getFileSystem() if err != nil { return err } if GlobalOpt.Verbose { log.Printf("Listing build files for %s in dir %q", repoLabel, dir) } // Only used for constructing the URLs for remote build data. var repoRevSpec sourcegraph.RepoRevSpec if !c.Local { cl := NewAPIClientWithAuthIfPresent() rrepo, err := getRemoteRepo(cl) if err != nil { return err } repoRevSpec.RepoSpec = rrepo.RepoSpec() lrepo, err := openLocalRepo() if err != nil { return err } repoRevSpec.Rev = lrepo.CommitID repoRevSpec.CommitID = lrepo.CommitID } printFile := func(fi os.FileInfo) { if c.Type == "f" && !fi.Mode().IsRegular() { return } if c.Type == "d" && !fi.Mode().IsDir() { return } var suffix string if fi.IsDir() { suffix = string(filepath.Separator) } var urlStr string if c.URLs { spec := sourcegraph.BuildDataFileSpec{RepoRev: repoRevSpec, Path: filepath.Join(dir, fi.Name())} // TODO(sqs): use sourcegraph.Router when it is merged to go-sourcegraph master u, err := router.NewAPIRouter(nil).Get(router.RepoBuildDataEntry).URLPath(router.MapToArray(spec.RouteVars())...) if err != nil { log.Fatal(err) } // Strip leading "/" so that the URL is relative to the // endpoint URL even if the endpoint URL contains a path. urlStr = getEndpointURL().ResolveReference(&url.URL{Path: u.Path[1:]}).String() } if c.Long { var timeStr string if !fi.ModTime().IsZero() { timeStr = fi.ModTime().Format("Jan _2 15:04") } fmt.Printf("% 7d %12s %s%s %s\n", fi.Size(), timeStr, fi.Name(), suffix, urlStr) } else { fmt.Println(fi.Name() + suffix) } } var fis []os.FileInfo if c.Recursive { w := fs.WalkFS(dir, rwvfs.Walkable(bdfs)) for w.Step() { if err := w.Err(); err != nil { return err } printFile(treeFileInfo{w.Path(), w.Stat()}) } } else { fis, err = bdfs.ReadDir(dir) if err != nil { return err } for _, fi := range fis { printFile(fi) } } return nil }
func (s *MultiStore) RepoBuildStore(repoURI string) (RepoBuildStore, error) { path := filepath.Clean(string(repoURI)) return Repo(rwvfs.Walkable(rwvfs.Sub(s.fs, path))), nil }
func (s *fsMultiRepoStore) openRepoStore(repo string) RepoStore { subpath := s.fs.Join(s.RepoToPath(repo)...) return NewFSRepoStore(rwvfs.Walkable(rwvfs.Sub(s.fs, subpath))) }