func archiveDirectory(b testing.TB) { repo, cleanup := repository.TestRepository(b) defer cleanup() arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil, nil) OK(b, err) b.Logf("snapshot archived as %v", id) }
func TestCheckerModifiedData(t *testing.T) { be := mem.New() repository.TestUseLowSecurityKDFParameters(t) repo := repository.New(be) test.OK(t, repo.Init(test.TestPassword)) arch := archiver.New(repo) _, id, err := arch.Snapshot(nil, []string{"."}, nil, nil) test.OK(t, err) t.Logf("archived as %v", id.Str()) beError := &errorBackend{Backend: be} checkRepo := repository.New(beError) test.OK(t, checkRepo.SearchKey(test.TestPassword, 5)) chkr := checker.New(checkRepo) hints, errs := chkr.LoadIndex() if len(errs) > 0 { t.Fatalf("expected no errors, got %v: %v", len(errs), errs) } if len(hints) > 0 { t.Errorf("expected no hints, got %v: %v", len(hints), hints) } beError.ProduceErrors = true errFound := false for _, err := range checkPacks(chkr) { t.Logf("pack error: %v", err) } for _, err := range checkStruct(chkr) { t.Logf("struct error: %v", err) } for _, err := range checkData(chkr) { t.Logf("struct error: %v", err) errFound = true } if !errFound { t.Fatal("no error found, checker is broken") } }
func testParallelSaveWithDuplication(t *testing.T, seed int) { repo, cleanup := repository.TestRepository(t) defer cleanup() dataSizeMb := 128 duplication := 7 arch := archiver.New(repo) chunks := getRandomData(seed, dataSizeMb*1024*1024) errChannels := [](<-chan error){} // interweaved processing of subsequent chunks maxParallel := 2*duplication - 1 barrier := make(chan struct{}, maxParallel) for _, c := range chunks { for dupIdx := 0; dupIdx < duplication; dupIdx++ { errChan := make(chan error) errChannels = append(errChannels, errChan) go func(c chunker.Chunk, errChan chan<- error) { barrier <- struct{}{} id := restic.Hash(c.Data) time.Sleep(time.Duration(id[0])) err := arch.Save(restic.DataBlob, c.Data, id) <-barrier errChan <- err }(c, errChan) } } for _, errChan := range errChannels { OK(t, <-errChan) } OK(t, repo.Flush()) OK(t, repo.SaveIndex()) chkr := createAndInitChecker(t, repo) assertNoUnreferencedPacks(t, chkr) }
func TestWalkTree(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() dirs, err := filepath.Glob(TestWalkerPath) OK(t, err) // archive a few files arch := archiver.New(repo) sn, _, err := arch.Snapshot(nil, dirs, nil, nil) OK(t, err) // flush repo, write all packs OK(t, repo.Flush()) done := make(chan struct{}) // start tree walker treeJobs := make(chan walk.TreeJob) go walk.Tree(repo, *sn.Tree, done, treeJobs) // start filesystem walker fsJobs := make(chan pipe.Job) resCh := make(chan pipe.Result, 1) f := func(string, os.FileInfo) bool { return true } go pipe.Walk(dirs, f, done, fsJobs, resCh) for { // receive fs job fsJob, fsChOpen := <-fsJobs Assert(t, !fsChOpen || fsJob != nil, "received nil job from filesystem: %v %v", fsJob, fsChOpen) if fsJob != nil { OK(t, fsJob.Error()) } var path string fsEntries := 1 switch j := fsJob.(type) { case pipe.Dir: path = j.Path() fsEntries = len(j.Entries) case pipe.Entry: path = j.Path() } // receive tree job treeJob, treeChOpen := <-treeJobs treeEntries := 1 OK(t, treeJob.Error) if treeJob.Tree != nil { treeEntries = len(treeJob.Tree.Nodes) } Assert(t, fsChOpen == treeChOpen, "one channel closed too early: fsChOpen %v, treeChOpen %v", fsChOpen, treeChOpen) if !fsChOpen || !treeChOpen { break } Assert(t, filepath.Base(path) == filepath.Base(treeJob.Path), "paths do not match: %q != %q", filepath.Base(path), filepath.Base(treeJob.Path)) Assert(t, fsEntries == treeEntries, "wrong number of entries: %v != %v", fsEntries, treeEntries) } }
func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error { target, err := readLinesFromFile(opts.FilesFrom) if err != nil { return err } // merge files from files-from into normal args so we can reuse the normal // args checks and have the ability to use both files-from and args at the // same time args = append(args, target...) if len(args) == 0 { return errors.Fatalf("wrong number of parameters") } for _, d := range args { if a, err := filepath.Abs(d); err == nil { d = a } target = append(target, d) } target, err = filterExisting(target) if err != nil { return err } // allowed devices var allowedDevs map[uint64]struct{} if opts.ExcludeOtherFS { allowedDevs, err = gatherDevices(target) if err != nil { return err } debug.Log("allowed devices: %v\n", allowedDevs) } repo, err := OpenRepository(gopts) if err != nil { return err } lock, err := lockRepo(repo) defer unlockRepo(lock) if err != nil { return err } err = repo.LoadIndex() if err != nil { return err } var parentSnapshotID *restic.ID // Force using a parent if !opts.Force && opts.Parent != "" { id, err := restic.FindSnapshot(repo, opts.Parent) if err != nil { return errors.Fatalf("invalid id %q: %v", opts.Parent, err) } parentSnapshotID = &id } // Find last snapshot to set it as parent, if not already set if !opts.Force && parentSnapshotID == nil { id, err := restic.FindLatestSnapshot(repo, target, "") if err == nil { parentSnapshotID = &id } else if err != restic.ErrNoSnapshotFound { return err } } if parentSnapshotID != nil { Verbosef("using parent snapshot %v\n", parentSnapshotID.Str()) } Verbosef("scan %v\n", target) // add patterns from file if opts.ExcludeFile != "" { file, err := fs.Open(opts.ExcludeFile) if err != nil { Warnf("error reading exclude patterns: %v", err) return nil } scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() if !strings.HasPrefix(line, "#") { line = os.ExpandEnv(line) opts.Excludes = append(opts.Excludes, line) } } } selectFilter := func(item string, fi os.FileInfo) bool { matched, err := filter.List(opts.Excludes, item) if err != nil { Warnf("error for exclude pattern: %v", err) } if matched { debug.Log("path %q excluded by a filter", item) return false } if !opts.ExcludeOtherFS || fi == nil { return true } id, err := fs.DeviceID(fi) if err != nil { // This should never happen because gatherDevices() would have // errored out earlier. If it still does that's a reason to panic. panic(err) } _, found := allowedDevs[id] if !found { debug.Log("path %q on disallowed device %d", item, id) return false } return true } stat, err := archiver.Scan(target, selectFilter, newScanProgress(gopts)) if err != nil { return err } arch := archiver.New(repo) arch.Excludes = opts.Excludes arch.SelectFilter = selectFilter arch.Error = func(dir string, fi os.FileInfo, err error) error { // TODO: make ignoring errors configurable Warnf("%s\rerror for %s: %v\n", ClearLine(), dir, err) return nil } _, id, err := arch.Snapshot(newArchiveProgress(gopts, stat), target, opts.Tags, parentSnapshotID) if err != nil { return err } Verbosef("snapshot %s saved\n", id.Str()) return nil }
func testArchiverDuplication(t *testing.T) { _, err := io.ReadFull(rand.Reader, DupID[:]) if err != nil { t.Fatal(err) } repo := repository.New(forgetfulBackend()) err = repo.Init("foo") if err != nil { t.Fatal(err) } arch := archiver.New(repo) wg := &sync.WaitGroup{} done := make(chan struct{}) for i := 0; i < parallelSaves; i++ { wg.Add(1) go func() { defer wg.Done() for { select { case <-done: return default: } id := randomID() if repo.Index().Has(id, restic.DataBlob) { continue } buf := make([]byte, 50) err := arch.Save(restic.DataBlob, buf, id) if err != nil { t.Fatal(err) } } }() } saveIndex := func() { defer wg.Done() ticker := time.NewTicker(testSaveIndexTime) defer ticker.Stop() for { select { case <-done: return case <-ticker.C: err := repo.SaveFullIndex() if err != nil { t.Fatal(err) } } } } wg.Add(1) go saveIndex() <-time.After(testTimeout) close(done) wg.Wait() }