func TestDelayedWalkTree(t *testing.T) { repodir, cleanup := Env(t, repoFixture) defer cleanup() repo := repository.TestOpenLocal(t, repodir) OK(t, repo.LoadIndex()) root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 100 * time.Millisecond} // start tree walker treeJobs := make(chan walk.TreeJob) go walk.Tree(dr, root, nil, treeJobs) i := 0 for job := range treeJobs { expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) if job.Path != expectedPath { t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) } i++ } if i != len(walktreeTestItems) { t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) } }
// ParallelWorkFuncParseID converts a function that takes a restic.ID to a // function that takes a string. Filenames that do not parse as a restic.ID // are ignored. func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { return func(s string, done <-chan struct{}) error { id, err := restic.ParseID(s) if err != nil { debug.Log("invalid ID %q: %v", id, err) return err } return f(id, done) } }
func parseIDsFromReader(t testing.TB, rd io.Reader) restic.IDs { IDs := restic.IDs{} sc := bufio.NewScanner(rd) for sc.Scan() { id, err := restic.ParseID(sc.Text()) if err != nil { t.Logf("parse id %v: %v", sc.Text(), err) continue } IDs = append(IDs, id) } return IDs }
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) { var trees struct { IDs restic.IDs sync.Mutex } var errs struct { errs []error sync.Mutex } snapshotWorker := func(strID string, done <-chan struct{}) error { id, err := restic.ParseID(strID) if err != nil { return err } debug.Log("load snapshot %v", id.Str()) treeID, err := loadTreeFromSnapshot(repo, id) if err != nil { errs.Lock() errs.errs = append(errs.errs, err) errs.Unlock() return nil } debug.Log("snapshot %v has tree %v", id.Str(), treeID.Str()) trees.Lock() trees.IDs = append(trees.IDs, treeID) trees.Unlock() return nil } err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker) if err != nil { errs.errs = append(errs.errs, err) } return trees.IDs, errs.errs }
func BenchmarkDelayedWalkTree(t *testing.B) { repodir, cleanup := Env(t, repoFixture) defer cleanup() repo := repository.TestOpenLocal(t, repodir) OK(t, repo.LoadIndex()) root, err := restic.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 10 * time.Millisecond} t.ResetTimer() for i := 0; i < t.N; i++ { // start tree walker treeJobs := make(chan walk.TreeJob) go walk.Tree(dr, root, nil, treeJobs) for _ = range treeJobs { } } }
func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- restic.ID) { defer close(out) in := r.be.List(t, done) var ( // disable sending on the outCh until we received a job outCh chan<- restic.ID // enable receiving from in inCh = in id restic.ID err error ) for { select { case <-done: return case strID, ok := <-inCh: if !ok { // input channel closed, we're done return } id, err = restic.ParseID(strID) if err != nil { // ignore invalid IDs continue } inCh = nil outCh = out case outCh <- id: outCh = nil inCh = in } } }
// TestBackend tests all functions of the backend. func TestBackend(t testing.TB) { b := open(t) defer close(t) for _, tpe := range []restic.FileType{ restic.DataFile, restic.KeyFile, restic.LockFile, restic.SnapshotFile, restic.IndexFile, } { // detect non-existing files for _, ts := range testStrings { id, err := restic.ParseID(ts.id) test.OK(t, err) // test if blob is already in repository ret, err := b.Test(tpe, id.String()) test.OK(t, err) test.Assert(t, !ret, "blob was found to exist before creating") // try to stat a not existing blob h := restic.Handle{Type: tpe, Name: id.String()} _, err = b.Stat(h) test.Assert(t, err != nil, "blob data could be extracted before creation") // try to read not existing blob _, err = b.Load(h, nil, 0) test.Assert(t, err != nil, "blob reader could be obtained before creation") // try to get string out, should fail ret, err = b.Test(tpe, id.String()) test.OK(t, err) test.Assert(t, !ret, "id %q was found (but should not have)", ts.id) } // add files for _, ts := range testStrings { store(t, b, tpe, []byte(ts.data)) // test Load() h := restic.Handle{Type: tpe, Name: ts.id} buf, err := backend.LoadAll(b, h, nil) test.OK(t, err) test.Equals(t, ts.data, string(buf)) // try to read it out with an offset and a length start := 1 end := len(ts.data) - 2 length := end - start buf2 := make([]byte, length) n, err := b.Load(h, buf2, int64(start)) test.OK(t, err) test.Equals(t, length, n) test.Equals(t, ts.data[start:end], string(buf2)) } // test adding the first file again ts := testStrings[0] // create blob err := b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data)) test.Assert(t, err != nil, "expected error, got %v", err) // remove and recreate err = b.Remove(tpe, ts.id) test.OK(t, err) // test that the blob is gone ok, err := b.Test(tpe, ts.id) test.OK(t, err) test.Assert(t, ok == false, "removed blob still present") // create blob err = b.Save(restic.Handle{Type: tpe, Name: ts.id}, []byte(ts.data)) test.OK(t, err) // list items IDs := restic.IDs{} for _, ts := range testStrings { id, err := restic.ParseID(ts.id) test.OK(t, err) IDs = append(IDs, id) } list := restic.IDs{} for s := range b.List(tpe, nil) { list = append(list, restic.TestParseID(s)) } if len(IDs) != len(list) { t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list)) } sort.Sort(IDs) sort.Sort(list) if !reflect.DeepEqual(IDs, list) { t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list) } // remove content if requested if test.TestCleanupTempDirs { for _, ts := range testStrings { id, err := restic.ParseID(ts.id) test.OK(t, err) found, err := b.Test(tpe, id.String()) test.OK(t, err) test.OK(t, b.Remove(tpe, id.String())) found, err = b.Test(tpe, id.String()) test.OK(t, err) test.Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) } } } }
// LoadIndex loads all index files. func (c *Checker) LoadIndex() (hints []error, errs []error) { debug.Log("Start") type indexRes struct { Index *repository.Index ID string } indexCh := make(chan indexRes) worker := func(id restic.ID, done <-chan struct{}) error { debug.Log("worker got index %v", id) idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex) if errors.Cause(err) == repository.ErrOldIndexFormat { debug.Log("index %v has old format", id.Str()) hints = append(hints, ErrOldIndexFormat{id}) idx, err = repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeOldIndex) } if err != nil { return err } select { case indexCh <- indexRes{Index: idx, ID: id.String()}: case <-done: } return nil } var perr error go func() { defer close(indexCh) debug.Log("start loading indexes in parallel") perr = repository.FilesInParallel(c.repo.Backend(), restic.IndexFile, defaultParallelism, repository.ParallelWorkFuncParseID(worker)) debug.Log("loading indexes finished, error: %v", perr) }() done := make(chan struct{}) defer close(done) if perr != nil { errs = append(errs, perr) return hints, errs } packToIndex := make(map[restic.ID]restic.IDSet) for res := range indexCh { debug.Log("process index %v", res.ID) idxID, err := restic.ParseID(res.ID) if err != nil { errs = append(errs, errors.Errorf("unable to parse as index ID: %v", res.ID)) continue } c.indexes[idxID] = res.Index c.masterIndex.Insert(res.Index) debug.Log("process blobs") cnt := 0 for blob := range res.Index.Each(done) { c.packs.Insert(blob.PackID) c.blobs.Insert(blob.ID) c.blobRefs.M[blob.ID] = 0 cnt++ if _, ok := packToIndex[blob.PackID]; !ok { packToIndex[blob.PackID] = restic.NewIDSet() } packToIndex[blob.PackID].Insert(idxID) } debug.Log("%d blobs processed", cnt) } debug.Log("done, error %v", perr) debug.Log("checking for duplicate packs") for packID := range c.packs { debug.Log(" check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID])) if len(packToIndex[packID]) > 1 { hints = append(hints, ErrDuplicatePacks{ PackID: packID, Indexes: packToIndex[packID], }) } } c.repo.SetIndex(c.masterIndex) return hints, errs }
func runCat(gopts GlobalOptions, args []string) error { if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { return errors.Fatalf("type or ID not specified") } repo, err := OpenRepository(gopts) if err != nil { return err } lock, err := lockRepo(repo) defer unlockRepo(lock) if err != nil { return err } tpe := args[0] var id restic.ID if tpe != "masterkey" && tpe != "config" { id, err = restic.ParseID(args[1]) if err != nil { if tpe != "snapshot" { return errors.Fatalf("unable to parse ID: %v\n", err) } // find snapshot id with prefix id, err = restic.FindSnapshot(repo, args[1]) if err != nil { return err } } } // handle all types that don't need an index switch tpe { case "config": buf, err := json.MarshalIndent(repo.Config(), "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "index": buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) if err != nil { return err } _, err = os.Stdout.Write(append(buf, '\n')) return err case "snapshot": sn := &restic.Snapshot{} err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn) if err != nil { return err } buf, err := json.MarshalIndent(&sn, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "key": h := restic.Handle{Type: restic.KeyFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } key := &repository.Key{} err = json.Unmarshal(buf, key) if err != nil { return err } buf, err = json.MarshalIndent(&key, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "masterkey": buf, err := json.MarshalIndent(repo.Key(), "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "lock": lock, err := restic.LoadLock(repo, id) if err != nil { return err } buf, err := json.MarshalIndent(&lock, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil } // load index, handle all the other types err = repo.LoadIndex() if err != nil { return err } switch tpe { case "pack": h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } hash := restic.Hash(buf) if !hash.Equal(id) { fmt.Fprintf(stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String()) } _, err = os.Stdout.Write(buf) return err case "blob": for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { list, err := repo.Index().Lookup(id, t) if err != nil { continue } blob := list[0] buf := make([]byte, blob.Length) n, err := repo.LoadBlob(restic.DataBlob, id, buf) if err != nil { return err } buf = buf[:n] _, err = os.Stdout.Write(buf) return err } return errors.Fatal("blob not found") case "tree": debug.Log("cat tree %v", id.Str()) tree, err := repo.LoadTree(id) if err != nil { debug.Log("unable to load tree %v: %v", id.Str(), err) return err } buf, err := json.MarshalIndent(&tree, "", " ") if err != nil { debug.Log("error json.MarshalIndent(): %v", err) return err } _, err = os.Stdout.Write(append(buf, '\n')) return nil default: return errors.Fatal("invalid type") } }