// SaveFile stores the content of the file on the backend as a Blob by calling // Save for each chunk. func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) (*restic.Node, error) { file, err := fs.Open(node.Path) defer file.Close() if err != nil { return node, errors.Wrap(err, "Open") } debug.RunHook("archiver.SaveFile", node.Path) node, err = arch.reloadFileIfChanged(node, file) if err != nil { return node, err } chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial) resultChannels := [](<-chan saveResult){} for { chunk, err := chnker.Next(getBuf()) if errors.Cause(err) == io.EOF { break } if err != nil { return node, errors.Wrap(err, "chunker.Next") } resCh := make(chan saveResult, 1) go arch.saveChunk(chunk, p, <-arch.blobToken, file, resCh) resultChannels = append(resultChannels, resCh) } results, err := waitForResults(resultChannels) if err != nil { return node, err } err = updateNodeContent(node, results) return node, err }
// Snapshot creates a snapshot of the given paths. If parentID is set, this is // used to compare the files to the ones archived at the time this snapshot was // taken. func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) { paths = unique(paths) sort.Sort(baseNameSlice(paths)) debug.Log("Archiver.Snapshot", "start for %v", paths) debug.RunHook("Archiver.Snapshot", nil) // signal the whole pipeline to stop done := make(chan struct{}) var err error p.Start() defer p.Done() // create new snapshot sn, err := NewSnapshot(paths) if err != nil { return nil, backend.ID{}, err } sn.Excludes = arch.Excludes jobs := archivePipe{} // use parent snapshot (if some was given) if parentID != nil { sn.Parent = parentID // load parent snapshot parent, err := LoadSnapshot(arch.repo, *parentID) if err != nil { return nil, backend.ID{}, err } // start walker on old tree ch := make(chan WalkTreeJob) go WalkTree(arch.repo, *parent.Tree, done, ch) jobs.Old = ch } else { // use closed channel ch := make(chan WalkTreeJob) close(ch) jobs.Old = ch } // start walker pipeCh := make(chan pipe.Job) resCh := make(chan pipe.Result, 1) go func() { pipe.Walk(paths, arch.SelectFilter, done, pipeCh, resCh) debug.Log("Archiver.Snapshot", "pipe.Walk done") }() jobs.New = pipeCh ch := make(chan pipe.Job) go jobs.compare(done, ch) var wg sync.WaitGroup entCh := make(chan pipe.Entry) dirCh := make(chan pipe.Dir) // split wg.Add(1) go func() { pipe.Split(ch, dirCh, entCh) debug.Log("Archiver.Snapshot", "split done") close(dirCh) close(entCh) wg.Done() }() // run workers for i := 0; i < maxConcurrency; i++ { wg.Add(2) go arch.fileWorker(&wg, p, done, entCh) go arch.dirWorker(&wg, p, done, dirCh) } // run index saver var wgIndexSaver sync.WaitGroup stopIndexSaver := make(chan struct{}) wgIndexSaver.Add(1) go arch.saveIndexes(&wgIndexSaver, stopIndexSaver) // wait for all workers to terminate debug.Log("Archiver.Snapshot", "wait for workers") wg.Wait() // stop index saver close(stopIndexSaver) wgIndexSaver.Wait() debug.Log("Archiver.Snapshot", "workers terminated") // receive the top-level tree root := (<-resCh).(*Node) debug.Log("Archiver.Snapshot", "root node received: %v", root.Subtree.Str()) sn.Tree = root.Subtree // save snapshot id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn) if err != nil { return nil, backend.ID{}, err } // store ID in snapshot struct sn.id = &id debug.Log("Archiver.Snapshot", "saved snapshot %v", id.Str()) // flush repository err = arch.repo.Flush() if err != nil { return nil, backend.ID{}, err } // save index err = arch.repo.SaveIndex() if err != nil { debug.Log("Archiver.Snapshot", "error saving index: %v", err) return nil, backend.ID{}, err } debug.Log("Archiver.Snapshot", "saved indexes") return sn, id, nil }
func walk(basedir, dir string, selectFunc SelectFunc, done <-chan struct{}, jobs chan<- Job, res chan<- Result) (excluded bool) { debug.Log("pipe.walk", "start on %q, basedir %q", dir, basedir) relpath, err := filepath.Rel(basedir, dir) if err != nil { panic(err) } info, err := os.Lstat(dir) if err != nil { debug.Log("pipe.walk", "error for %v: %v, res %p", dir, err, res) select { case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}: case <-done: } return } if !selectFunc(dir, info) { debug.Log("pipe.walk", "file %v excluded by filter, res %p", dir, res) excluded = true return } if !info.IsDir() { debug.Log("pipe.walk", "sending file job for %v, res %p", dir, res) select { case jobs <- Entry{info: info, basedir: basedir, path: relpath, result: res}: case <-done: } return } debug.RunHook("pipe.readdirnames", dir) names, err := readDirNames(dir) if err != nil { debug.Log("pipe.walk", "Readdirnames(%v) returned error: %v, res %p", dir, err, res) select { case <-done: case jobs <- Dir{basedir: basedir, path: relpath, info: info, error: err, result: res}: } return } // Insert breakpoint to allow testing behaviour with vanishing files // between Readdir() and lstat() debug.RunHook("pipe.walk1", relpath) entries := make([]<-chan Result, 0, len(names)) for _, name := range names { subpath := filepath.Join(dir, name) fi, statErr := os.Lstat(subpath) if !selectFunc(subpath, fi) { debug.Log("pipe.walk", "file %v excluded by filter", subpath) continue } ch := make(chan Result, 1) entries = append(entries, ch) if statErr != nil { debug.Log("pipe.walk", "sending file job for %v, err %v, res %p", subpath, err, res) select { case jobs <- Entry{info: fi, error: statErr, basedir: basedir, path: filepath.Join(relpath, name), result: ch}: case <-done: return } continue } // Insert breakpoint to allow testing behaviour with vanishing files // between walk and open debug.RunHook("pipe.walk2", filepath.Join(relpath, name)) walk(basedir, subpath, selectFunc, done, jobs, ch) } debug.Log("pipe.walk", "sending dirjob for %q, basedir %q, res %p", dir, basedir, res) select { case jobs <- Dir{basedir: basedir, path: relpath, info: info, Entries: entries, result: res}: case <-done: } return }