func TestDelayedWalkTree(t *testing.T) { repodir, cleanup := Env(t, repoFixture) defer cleanup() repo := repository.TestOpenLocal(t, repodir) OK(t, repo.LoadIndex()) root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 100 * time.Millisecond} // start tree walker treeJobs := make(chan walk.TreeJob) go walk.Tree(dr, root, nil, treeJobs) i := 0 for job := range treeJobs { expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) if job.Path != expectedPath { t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) } i++ } if i != len(walktreeTestItems) { t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) } }
func BenchmarkDelayedWalkTree(t *testing.B) { repodir, cleanup := Env(t, repoFixture) defer cleanup() repo := repository.TestOpenLocal(t, repodir) OK(t, repo.LoadIndex()) root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 10 * time.Millisecond} t.ResetTimer() for i := 0; i < t.N; i++ { // start tree walker treeJobs := make(chan walk.TreeJob) go walk.Tree(dr, root, nil, treeJobs) for _ = range treeJobs { } } }
func TestWalkTree(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() dirs, err := filepath.Glob(TestWalkerPath) OK(t, err) // archive a few files arch := archiver.New(repo) sn, _, err := arch.Snapshot(nil, dirs, nil, nil) OK(t, err) // flush repo, write all packs OK(t, repo.Flush()) done := make(chan struct{}) // start tree walker treeJobs := make(chan walk.TreeJob) go walk.Tree(repo, *sn.Tree, done, treeJobs) // start filesystem walker fsJobs := make(chan pipe.Job) resCh := make(chan pipe.Result, 1) f := func(string, os.FileInfo) bool { return true } go pipe.Walk(dirs, f, done, fsJobs, resCh) for { // receive fs job fsJob, fsChOpen := <-fsJobs Assert(t, !fsChOpen || fsJob != nil, "received nil job from filesystem: %v %v", fsJob, fsChOpen) if fsJob != nil { OK(t, fsJob.Error()) } var path string fsEntries := 1 switch j := fsJob.(type) { case pipe.Dir: path = j.Path() fsEntries = len(j.Entries) case pipe.Entry: path = j.Path() } // receive tree job treeJob, treeChOpen := <-treeJobs treeEntries := 1 OK(t, treeJob.Error) if treeJob.Tree != nil { treeEntries = len(treeJob.Tree.Nodes) } Assert(t, fsChOpen == treeChOpen, "one channel closed too early: fsChOpen %v, treeChOpen %v", fsChOpen, treeChOpen) if !fsChOpen || !treeChOpen { break } Assert(t, filepath.Base(path) == filepath.Base(treeJob.Path), "paths do not match: %q != %q", filepath.Base(path), filepath.Base(treeJob.Path)) Assert(t, fsEntries == treeEntries, "wrong number of entries: %v != %v", fsEntries, treeEntries) } }
// Snapshot creates a snapshot of the given paths. If parentrestic.ID is set, this is // used to compare the files to the ones archived at the time this snapshot was // taken. func (arch *Archiver) Snapshot(p *restic.Progress, paths, tags []string, parentID *restic.ID) (*restic.Snapshot, restic.ID, error) { paths = unique(paths) sort.Sort(baseNameSlice(paths)) debug.Log("start for %v", paths) debug.RunHook("Archiver.Snapshot", nil) // signal the whole pipeline to stop done := make(chan struct{}) var err error p.Start() defer p.Done() // create new snapshot sn, err := restic.NewSnapshot(paths, tags) if err != nil { return nil, restic.ID{}, err } sn.Excludes = arch.Excludes jobs := archivePipe{} // use parent snapshot (if some was given) if parentID != nil { sn.Parent = parentID // load parent snapshot parent, err := restic.LoadSnapshot(arch.repo, *parentID) if err != nil { return nil, restic.ID{}, err } // start walker on old tree ch := make(chan walk.TreeJob) go walk.Tree(arch.repo, *parent.Tree, done, ch) jobs.Old = ch } else { // use closed channel ch := make(chan walk.TreeJob) close(ch) jobs.Old = ch } // start walker pipeCh := make(chan pipe.Job) resCh := make(chan pipe.Result, 1) go func() { pipe.Walk(paths, arch.SelectFilter, done, pipeCh, resCh) debug.Log("pipe.Walk done") }() jobs.New = pipeCh ch := make(chan pipe.Job) go jobs.compare(done, ch) var wg sync.WaitGroup entCh := make(chan pipe.Entry) dirCh := make(chan pipe.Dir) // split wg.Add(1) go func() { pipe.Split(ch, dirCh, entCh) debug.Log("split done") close(dirCh) close(entCh) wg.Done() }() // run workers for i := 0; i < maxConcurrency; i++ { wg.Add(2) go arch.fileWorker(&wg, p, done, entCh) go arch.dirWorker(&wg, p, done, dirCh) } // run index saver var wgIndexSaver sync.WaitGroup stopIndexSaver := make(chan struct{}) wgIndexSaver.Add(1) go arch.saveIndexes(&wgIndexSaver, stopIndexSaver) // wait for all workers to terminate debug.Log("wait for workers") wg.Wait() // stop index saver close(stopIndexSaver) wgIndexSaver.Wait() debug.Log("workers terminated") // receive the top-level tree root := (<-resCh).(*restic.Node) debug.Log("root node received: %v", root.Subtree.Str()) sn.Tree = root.Subtree // save snapshot id, err := arch.repo.SaveJSONUnpacked(restic.SnapshotFile, sn) if err != nil { return nil, restic.ID{}, err } debug.Log("saved snapshot %v", id.Str()) // flush repository err = arch.repo.Flush() if err != nil { return nil, restic.ID{}, err } // save index err = arch.repo.SaveIndex() if err != nil { debug.Log("error saving index: %v", err) return nil, restic.ID{}, err } debug.Log("saved indexes") return sn, id, nil }