func TestDelayedWalkTree(t *testing.T) { WithTestEnvironment(t, repoFixture, func(repodir string) { repo := OpenLocalRepo(t, repodir) OK(t, repo.LoadIndex()) root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 100 * time.Millisecond} // start tree walker treeJobs := make(chan restic.WalkTreeJob) go restic.WalkTree(dr, root, nil, treeJobs) i := 0 for job := range treeJobs { expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...) if job.Path != expectedPath { t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path) } i++ } if i != len(walktreeTestItems) { t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems)) } }) }
// ParseID parses s as a backend.ID and panics if that fails. func ParseID(s string) backend.ID { id, err := backend.ParseID(s) if err != nil { panic(err) } return id }
// FindSnapshot takes a string and tries to find a snapshot whose ID matches // the string as closely as possible. func FindSnapshot(repo *repository.Repository, s string) (backend.ID, error) { // find snapshot id with prefix name, err := backend.Find(repo.Backend(), backend.Snapshot, s) if err != nil { return backend.ID{}, err } return backend.ParseID(name) }
// ParallelWorkFuncParseID converts a function that takes a backend.ID to a // function that takes a string. func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { return func(s string, done <-chan struct{}) error { id, err := backend.ParseID(s) if err != nil { return err } return f(id, done) } }
// ParallelWorkFuncParseID converts a function that takes a backend.ID to a // function that takes a string. Filenames that do not parse as a backend.ID // are ignored. func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc { return func(s string, done <-chan struct{}) error { id, err := backend.ParseID(s) if err != nil { debug.Log("repository.ParallelWorkFuncParseID", "invalid ID %q: %v", id, err) return nil } return f(id, done) } }
func TestID(t *testing.T) { for _, test := range TestStrings { id, err := backend.ParseID(test.id) OK(t, err) id2, err := backend.ParseID(test.id) OK(t, err) Assert(t, id.Equal(id2), "ID.Equal() does not work as expected") ret, err := id.EqualString(test.id) OK(t, err) Assert(t, ret, "ID.EqualString() returned wrong value") // test json marshalling buf, err := id.MarshalJSON() OK(t, err) Equals(t, "\""+test.id+"\"", string(buf)) var id3 backend.ID err = id3.UnmarshalJSON(buf) OK(t, err) Equals(t, id, id3) } }
func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs { IDs := backend.IDs{} sc := bufio.NewScanner(rd) for sc.Scan() { id, err := backend.ParseID(sc.Text()) if err != nil { t.Logf("parse id %v: %v", sc.Text(), err) continue } IDs = append(IDs, id) } return IDs }
func (c *Cache) list(t backend.Type) ([]cacheEntry, error) { var dir string switch t { case backend.Snapshot: dir = filepath.Join(c.base, "snapshots") default: return nil, fmt.Errorf("cache not supported for type %v", t) } fd, err := os.Open(dir) if err != nil { if os.IsNotExist(err) { return []cacheEntry{}, nil } return nil, err } defer fd.Close() fis, err := fd.Readdir(-1) if err != nil { return nil, err } entries := make([]cacheEntry, 0, len(fis)) for _, fi := range fis { parts := strings.SplitN(fi.Name(), ".", 2) id, err := backend.ParseID(parts[0]) // ignore invalid cache entries for now if err != nil { debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err) continue } e := cacheEntry{ID: id} if len(parts) == 2 { e.Subtype = parts[1] } entries = append(entries, e) } return entries, nil }
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { var trees struct { IDs backend.IDs sync.Mutex } var errs struct { errs []error sync.Mutex } snapshotWorker := func(strID string, done <-chan struct{}) error { id, err := backend.ParseID(strID) if err != nil { return err } debug.Log("Checker.Snaphots", "load snapshot %v", id.Str()) treeID, err := loadTreeFromSnapshot(repo, id) if err != nil { errs.Lock() errs.errs = append(errs.errs, err) errs.Unlock() return nil } debug.Log("Checker.Snaphots", "snapshot %v has tree %v", id.Str(), treeID.Str()) trees.Lock() trees.IDs = append(trees.IDs, treeID) trees.Unlock() return nil } err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker) if err != nil { errs.errs = append(errs.errs, err) } return trees.IDs, errs.errs }
func BenchmarkDelayedWalkTree(t *testing.B) { WithTestEnvironment(t, repoFixture, func(repodir string) { repo := OpenLocalRepo(t, repodir) OK(t, repo.LoadIndex()) root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da") OK(t, err) dr := delayRepo{repo, 10 * time.Millisecond} t.ResetTimer() for i := 0; i < t.N; i++ { // start tree walker treeJobs := make(chan restic.WalkTreeJob) go restic.WalkTree(dr, root, nil, treeJobs) for _ = range treeJobs { } } }) }
// LoadIndexWithDecoder loads the index and decodes it with fn. func LoadIndexWithDecoder(repo *Repository, id string, fn func(io.Reader) (*Index, error)) (idx *Index, err error) { debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8]) idxID, err := backend.ParseID(id) if err != nil { return nil, err } buf, err := repo.LoadAndDecrypt(backend.Index, idxID) if err != nil { return nil, err } idx, err = fn(bytes.NewReader(buf)) if err != nil { debug.Log("LoadIndexWithDecoder", "error while decoding index %v: %v", id, err) return nil, err } idx.id = idxID return idx, nil }
func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) { defer close(out) in := r.be.List(t, done) var ( // disable sending on the outCh until we received a job outCh chan<- backend.ID // enable receiving from in inCh = in id backend.ID err error ) for { select { case <-done: return case strID, ok := <-inCh: if !ok { // input channel closed, we're done return } id, err = backend.ParseID(strID) if err != nil { // ignore invalid IDs continue } inCh = nil outCh = out case outCh <- id: outCh = nil inCh = in } } }
// LoadIndex loads all index files. func (c *Checker) LoadIndex() (hints []error, errs []error) { debug.Log("LoadIndex", "Start") type indexRes struct { Index *repository.Index ID string } indexCh := make(chan indexRes) worker := func(id backend.ID, done <-chan struct{}) error { debug.Log("LoadIndex", "worker got index %v", id) idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex) if err == repository.ErrOldIndexFormat { debug.Log("LoadIndex", "index %v has old format", id.Str()) hints = append(hints, ErrOldIndexFormat{id}) idx, err = repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeOldIndex) } if err != nil { return err } select { case indexCh <- indexRes{Index: idx, ID: id.String()}: case <-done: } return nil } var perr error go func() { defer close(indexCh) debug.Log("LoadIndex", "start loading indexes in parallel") perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, repository.ParallelWorkFuncParseID(worker)) debug.Log("LoadIndex", "loading indexes finished, error: %v", perr) }() done := make(chan struct{}) defer close(done) if perr != nil { errs = append(errs, perr) return hints, errs } packToIndex := make(map[backend.ID]backend.IDSet) for res := range indexCh { debug.Log("LoadIndex", "process index %v", res.ID) idxID, err := backend.ParseID(res.ID) if err != nil { errs = append(errs, fmt.Errorf("unable to parse as index ID: %v", res.ID)) continue } c.indexes[idxID] = res.Index c.masterIndex.Insert(res.Index) debug.Log("LoadIndex", "process blobs") cnt := 0 for blob := range res.Index.Each(done) { c.packs.Insert(blob.PackID) c.blobs.Insert(blob.ID) c.blobRefs.M[blob.ID] = 0 cnt++ if _, ok := packToIndex[blob.PackID]; !ok { packToIndex[blob.PackID] = backend.NewIDSet() } packToIndex[blob.PackID].Insert(idxID) } debug.Log("LoadIndex", "%d blobs processed", cnt) } debug.Log("LoadIndex", "done, error %v", perr) debug.Log("LoadIndex", "checking for duplicate packs") for packID := range c.packs { debug.Log("LoadIndex", " check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID])) if len(packToIndex[packID]) > 1 { hints = append(hints, ErrDuplicatePacks{ PackID: packID, Indexes: packToIndex[packID], }) } } c.repo.SetIndex(c.masterIndex) return hints, errs }
func (cmd CmdCat) Execute(args []string) error { if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage()) } repo, err := cmd.global.OpenRepository() if err != nil { return err } lock, err := lockRepo(repo) defer unlockRepo(lock) if err != nil { return err } tpe := args[0] var id backend.ID if tpe != "masterkey" && tpe != "config" { id, err = backend.ParseID(args[1]) if err != nil { if tpe != "snapshot" { return err } // find snapshot id with prefix id, err = restic.FindSnapshot(repo, args[1]) if err != nil { return err } } } // handle all types that don't need an index switch tpe { case "config": buf, err := json.MarshalIndent(repo.Config, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "index": buf, err := repo.LoadAndDecrypt(backend.Index, id) if err != nil { return err } _, err = os.Stdout.Write(append(buf, '\n')) return err case "snapshot": sn := &restic.Snapshot{} err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn) if err != nil { return err } buf, err := json.MarshalIndent(&sn, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "key": h := backend.Handle{Type: backend.Key, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } key := &repository.Key{} err = json.Unmarshal(buf, key) if err != nil { return err } buf, err = json.MarshalIndent(&key, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "masterkey": buf, err := json.MarshalIndent(repo.Key(), "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "lock": lock, err := restic.LoadLock(repo, id) if err != nil { return err } buf, err := json.MarshalIndent(&lock, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil } // load index, handle all the other types err = repo.LoadIndex() if err != nil { return err } switch tpe { case "pack": h := backend.Handle{Type: backend.Data, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } _, err = os.Stdout.Write(buf) return err case "blob": blob, err := repo.Index().Lookup(id) if err != nil { return err } buf := make([]byte, blob.Length) data, err := repo.LoadBlob(blob.Type, id, buf) if err != nil { return err } _, err = os.Stdout.Write(data) return err case "tree": debug.Log("cat", "cat tree %v", id.Str()) tree := restic.NewTree() err = repo.LoadJSONPack(pack.Tree, id, tree) if err != nil { debug.Log("cat", "unable to load tree %v: %v", id.Str(), err) return err } buf, err := json.MarshalIndent(&tree, "", " ") if err != nil { debug.Log("cat", "error json.MarshalIndent(): %v", err) return err } _, err = os.Stdout.Write(append(buf, '\n')) return nil default: return errors.New("invalid type") } }
// TestBackend tests all functions of the backend. func TestBackend(t testing.TB) { b := open(t) defer close(t) for _, tpe := range []backend.Type{ backend.Data, backend.Key, backend.Lock, backend.Snapshot, backend.Index, } { // detect non-existing files for _, test := range testStrings { id, err := backend.ParseID(test.id) OK(t, err) // test if blob is already in repository ret, err := b.Test(tpe, id.String()) OK(t, err) Assert(t, !ret, "blob was found to exist before creating") // try to stat a not existing blob h := backend.Handle{Type: tpe, Name: id.String()} _, err = b.Stat(h) Assert(t, err != nil, "blob data could be extracted before creation") // try to read not existing blob _, err = b.Load(h, nil, 0) Assert(t, err != nil, "blob reader could be obtained before creation") // try to get string out, should fail ret, err = b.Test(tpe, id.String()) OK(t, err) Assert(t, !ret, "id %q was found (but should not have)", test.id) } // add files for _, test := range testStrings { store(t, b, tpe, []byte(test.data)) // test Load() h := backend.Handle{Type: tpe, Name: test.id} buf, err := backend.LoadAll(b, h, nil) OK(t, err) Equals(t, test.data, string(buf)) // try to read it out with an offset and a length start := 1 end := len(test.data) - 2 length := end - start buf2 := make([]byte, length) n, err := b.Load(h, buf2, int64(start)) OK(t, err) Equals(t, length, n) Equals(t, test.data[start:end], string(buf2)) } // test adding the first file again test := testStrings[0] // create blob err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) Assert(t, err != nil, "expected error, got %v", err) // remove and recreate err = b.Remove(tpe, test.id) OK(t, err) // test that the blob is gone ok, err := b.Test(tpe, test.id) OK(t, err) Assert(t, ok == false, "removed blob still present") // create blob err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data)) OK(t, err) // list items IDs := backend.IDs{} for _, test := range testStrings { id, err := backend.ParseID(test.id) OK(t, err) IDs = append(IDs, id) } list := backend.IDs{} for s := range b.List(tpe, nil) { list = append(list, ParseID(s)) } if len(IDs) != len(list) { t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list)) } sort.Sort(IDs) sort.Sort(list) if !reflect.DeepEqual(IDs, list) { t.Fatalf("lists aren't equal, want:\n %v\n got:\n%v\n", IDs, list) } // remove content if requested if TestCleanupTempDirs { for _, test := range testStrings { id, err := backend.ParseID(test.id) OK(t, err) found, err := b.Test(tpe, id.String()) OK(t, err) OK(t, b.Remove(tpe, id.String())) found, err = b.Test(tpe, id.String()) OK(t, err) Assert(t, !found, fmt.Sprintf("id %q not found after removal", id)) } } } }