Ejemplo n.º 1
0
// selectBlobs splits the list of all blobs randomly into two lists. A blob
// will be contained in the firstone ith probability p.
func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 restic.BlobSet) {
	done := make(chan struct{})
	defer close(done)

	list1 = restic.NewBlobSet()
	list2 = restic.NewBlobSet()

	blobs := restic.NewBlobSet()

	for id := range repo.List(restic.DataFile, done) {
		entries, _, err := repo.ListPack(id)
		if err != nil {
			t.Fatalf("error listing pack %v: %v", id, err)
		}

		for _, entry := range entries {
			h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
			if blobs.Has(h) {
				t.Errorf("ignoring duplicate blob %v", h)
				continue
			}
			blobs.Insert(h)

			if rand.Float32() <= p {
				list1.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
			} else {
				list2.Insert(restic.BlobHandle{ID: entry.ID, Type: entry.Type})
			}

		}
	}

	return list1, list2
}
Ejemplo n.º 2
0
func validateIndex(t testing.TB, repo restic.Repository, idx *Index) {
	for id := range repo.List(restic.DataFile, nil) {
		if _, ok := idx.Packs[id]; !ok {
			t.Errorf("pack %v missing from index", id.Str())
		}
	}
}
Ejemplo n.º 3
0
func packIDTester(repo restic.Repository, inChan <-chan restic.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
	debug.Log("worker start")
	defer debug.Log("worker done")

	defer wg.Done()

	for id := range inChan {
		ok, err := repo.Backend().Test(restic.DataFile, id.String())
		if err != nil {
			err = PackError{ID: id, Err: err}
		} else {
			if !ok {
				err = PackError{ID: id, Err: errors.New("does not exist")}
			}
		}

		if err != nil {
			debug.Log("error checking for pack %s: %v", id.Str(), err)
			select {
			case <-done:
				return
			case errChan <- err:
			}

			continue
		}

		debug.Log("pack %s exists", id.Str())
	}
}
Ejemplo n.º 4
0
func newDirFromSnapshot(repo restic.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) {
	debug.Log("new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str())
	tree, err := repo.LoadTree(*snapshot.Tree)
	if err != nil {
		debug.Log("  loadTree(%v) failed: %v", snapshot.ID.Str(), err)
		return nil, err
	}
	items := make(map[string]*restic.Node)
	for _, n := range tree.Nodes {
		nodes, err := replaceSpecialNodes(repo, n)
		if err != nil {
			debug.Log("  replaceSpecialNodes(%v) failed: %v", n, err)
			return nil, err
		}

		for _, node := range nodes {
			items[node.Name] = node
		}
	}

	return &dir{
		repo: repo,
		node: &restic.Node{
			UID:        uint32(os.Getuid()),
			GID:        uint32(os.Getgid()),
			AccessTime: snapshot.Time,
			ModTime:    snapshot.Time,
			ChangeTime: snapshot.Time,
			Mode:       os.ModeDir | 0555,
		},
		items:       items,
		inode:       inodeFromBackendID(snapshot.ID),
		ownerIsRoot: ownerIsRoot,
	}, nil
}
Ejemplo n.º 5
0
// Save writes a new index containing the given packs.
func Save(repo restic.Repository, packs map[restic.ID][]restic.Blob, supersedes restic.IDs) (restic.ID, error) {
	idx := &indexJSON{
		Supersedes: supersedes,
		Packs:      make([]*packJSON, 0, len(packs)),
	}

	for packID, blobs := range packs {
		b := make([]blobJSON, 0, len(blobs))
		for _, blob := range blobs {
			b = append(b, blobJSON{
				ID:     blob.ID,
				Type:   blob.Type,
				Offset: blob.Offset,
				Length: blob.Length,
			})
		}

		p := &packJSON{
			ID:    packID,
			Blobs: b,
		}

		idx.Packs = append(idx.Packs, p)
	}

	return repo.SaveJSONUnpacked(restic.IndexFile, idx)
}
Ejemplo n.º 6
0
func countPacks(repo restic.Repository, t restic.FileType) (n uint) {
	for _ = range repo.Backend().List(t, nil) {
		n++
	}

	return n
}
Ejemplo n.º 7
0
func loadBlob(t *testing.T, repo restic.Repository, id restic.ID, buf []byte) int {
	n, err := repo.LoadBlob(restic.DataBlob, id, buf)
	if err != nil {
		t.Fatalf("LoadBlob(%v) returned error %v", id, err)
	}

	return n
}
Ejemplo n.º 8
0
func createFakeLock(repo restic.Repository, t time.Time, pid int) (restic.ID, error) {
	hostname, err := os.Hostname()
	if err != nil {
		return restic.ID{}, err
	}

	newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname}
	return repo.SaveJSONUnpacked(restic.LockFile, &newLock)
}
Ejemplo n.º 9
0
// SaveIndex saves an index in the repository.
func SaveIndex(repo restic.Repository, index *Index) (restic.ID, error) {
	buf := bytes.NewBuffer(nil)

	err := index.Finalize(buf)
	if err != nil {
		return restic.ID{}, err
	}

	return repo.SaveUnpacked(restic.IndexFile, buf.Bytes())
}
Ejemplo n.º 10
0
func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) {
	debug.Log("process index %v\n", id.Str())

	var idx indexJSON
	err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx)
	if err != nil {
		return nil, err
	}

	return &idx, nil
}
Ejemplo n.º 11
0
func listPacks(t *testing.T, repo restic.Repository) restic.IDSet {
	done := make(chan struct{})
	defer close(done)

	list := restic.NewIDSet()
	for id := range repo.List(restic.DataFile, done) {
		list.Insert(id)
	}

	return list
}
Ejemplo n.º 12
0
// saveRandomDataBlobs generates random data blobs and saves them to the repository.
func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) {
	for i := 0; i < num; i++ {
		size := mrand.Int() % sizeMax

		buf := make([]byte, size)
		_, err := io.ReadFull(rand.Reader, buf)
		OK(t, err)

		_, err = repo.SaveBlob(restic.DataBlob, buf, restic.ID{})
		OK(t, err)
	}
}
Ejemplo n.º 13
0
// RebuildIndex lists all packs in the repo, writes a new index and removes all
// old indexes. This operation should only be done with an exclusive lock in
// place.
func RebuildIndex(repo restic.Repository) error {
	debug.Log("start rebuilding index")

	done := make(chan struct{})
	defer close(done)

	ch := make(chan worker.Job)
	go list.AllPacks(repo, ch, done)

	idx := NewIndex()
	for job := range ch {
		id := job.Data.(restic.ID)

		if job.Error != nil {
			fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
			continue
		}

		res := job.Result.(list.Result)

		for _, entry := range res.Entries() {
			pb := restic.PackedBlob{
				Blob:   entry,
				PackID: res.PackID(),
			}
			idx.Store(pb)
		}
	}

	oldIndexes := restic.NewIDSet()
	for id := range repo.List(restic.IndexFile, done) {
		idx.AddToSupersedes(id)
		oldIndexes.Insert(id)
	}

	id, err := SaveIndex(repo, idx)
	if err != nil {
		debug.Log("error saving index: %v", err)
		return err
	}
	debug.Log("new index saved as %v", id.Str())

	for indexID := range oldIndexes {
		err := repo.Backend().Remove(restic.IndexFile, indexID.String())
		if err != nil {
			fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err)
		}
	}

	return nil
}
Ejemplo n.º 14
0
// replaceSpecialNodes replaces nodes with name "." and "/" by their contents.
// Otherwise, the node is returned.
func replaceSpecialNodes(repo restic.Repository, node *restic.Node) ([]*restic.Node, error) {
	if node.Type != "dir" || node.Subtree == nil {
		return []*restic.Node{node}, nil
	}

	if node.Name != "." && node.Name != "/" {
		return []*restic.Node{node}, nil
	}

	tree, err := repo.LoadTree(*node.Subtree)
	if err != nil {
		return nil, err
	}

	return tree.Nodes, nil
}
Ejemplo n.º 15
0
func findPacksForBlobs(t *testing.T, repo restic.Repository, blobs restic.BlobSet) restic.IDSet {
	packs := restic.NewIDSet()

	idx := repo.Index()
	for h := range blobs {
		list, err := idx.Lookup(h.ID, h.Type)
		if err != nil {
			t.Fatal(err)
		}

		for _, pb := range list {
			packs.Insert(pb.PackID)
		}
	}

	return packs
}
Ejemplo n.º 16
0
// LoadIndexWithDecoder loads the index and decodes it with fn.
func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
	debug.Log("Loading index %v", id.Str())

	buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
	if err != nil {
		return nil, err
	}

	idx, err = fn(bytes.NewReader(buf))
	if err != nil {
		debug.Log("error while decoding index %v: %v", id, err)
		return nil, err
	}

	idx.id = id

	return idx, nil
}
Ejemplo n.º 17
0
func checkSavedFile(t *testing.T, repo restic.Repository, treeID restic.ID, name string, rd io.Reader) {
	tree, err := repo.LoadTree(treeID)
	if err != nil {
		t.Fatalf("LoadTree() returned error %v", err)
	}

	if len(tree.Nodes) != 1 {
		t.Fatalf("wrong number of nodes for tree, want %v, got %v", 1, len(tree.Nodes))
	}

	node := tree.Nodes[0]
	if node.Name != "fakefile" {
		t.Fatalf("wrong filename, want %v, got %v", "fakefile", node.Name)
	}

	if len(node.Content) == 0 {
		t.Fatalf("node.Content has length 0")
	}

	// check blobs
	for i, id := range node.Content {
		size, err := repo.LookupBlobSize(id, restic.DataBlob)
		if err != nil {
			t.Fatal(err)
		}

		buf := make([]byte, int(size))
		n := loadBlob(t, repo, id, buf)
		if n != len(buf) {
			t.Errorf("wrong number of bytes read, want %d, got %d", len(buf), n)
		}

		buf2 := make([]byte, int(size))
		_, err = io.ReadFull(rd, buf2)
		if err != nil {
			t.Fatal(err)
		}

		if !bytes.Equal(buf, buf2) {
			t.Fatalf("blob %d (%v) is wrong", i, id.Str())
		}
	}
}
Ejemplo n.º 18
0
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(repo restic.Repository) (restic.IDs, []error) {
	var trees struct {
		IDs restic.IDs
		sync.Mutex
	}

	var errs struct {
		errs []error
		sync.Mutex
	}

	snapshotWorker := func(strID string, done <-chan struct{}) error {
		id, err := restic.ParseID(strID)
		if err != nil {
			return err
		}

		debug.Log("load snapshot %v", id.Str())

		treeID, err := loadTreeFromSnapshot(repo, id)
		if err != nil {
			errs.Lock()
			errs.errs = append(errs.errs, err)
			errs.Unlock()
			return nil
		}

		debug.Log("snapshot %v has tree %v", id.Str(), treeID.Str())
		trees.Lock()
		trees.IDs = append(trees.IDs, treeID)
		trees.Unlock()

		return nil
	}

	err := repository.FilesInParallel(repo.Backend(), restic.SnapshotFile, defaultParallelism, snapshotWorker)
	if err != nil {
		errs.errs = append(errs.errs, err)
	}

	return trees.IDs, errs.errs
}
Ejemplo n.º 19
0
func dumpIndexes(repo restic.Repository) error {
	done := make(chan struct{})
	defer close(done)

	for id := range repo.List(restic.IndexFile, done) {
		fmt.Printf("index_id: %v\n", id)

		idx, err := repository.LoadIndex(repo, id)
		if err != nil {
			return err
		}

		err = idx.Dump(os.Stdout)
		if err != nil {
			return err
		}
	}

	return nil
}
Ejemplo n.º 20
0
func newDir(repo restic.Repository, node *restic.Node, ownerIsRoot bool) (*dir, error) {
	debug.Log("new dir for %v (%v)", node.Name, node.Subtree.Str())
	tree, err := repo.LoadTree(*node.Subtree)
	if err != nil {
		debug.Log("  error loading tree %v: %v", node.Subtree.Str(), err)
		return nil, err
	}
	items := make(map[string]*restic.Node)
	for _, node := range tree.Nodes {
		items[node.Name] = node
	}

	return &dir{
		repo:        repo,
		node:        node,
		items:       items,
		inode:       node.Inode,
		ownerIsRoot: ownerIsRoot,
	}, nil
}
Ejemplo n.º 21
0
// loadTreeWorker loads trees from repo and sends them to out.
func loadTreeWorker(repo restic.Repository,
	in <-chan restic.ID, out chan<- treeJob,
	done <-chan struct{}, wg *sync.WaitGroup) {

	defer func() {
		debug.Log("exiting")
		wg.Done()
	}()

	var (
		inCh  = in
		outCh = out
		job   treeJob
	)

	outCh = nil
	for {
		select {
		case <-done:
			return

		case treeID, ok := <-inCh:
			if !ok {
				return
			}
			debug.Log("load tree %v", treeID.Str())

			tree, err := repo.LoadTree(treeID)
			debug.Log("load tree %v (%v) returned err: %v", tree, treeID.Str(), err)
			job = treeJob{ID: treeID, error: err, Tree: tree}
			outCh = out
			inCh = nil

		case outCh <- job:
			debug.Log("sent tree %v", job.ID.Str())
			outCh = nil
			inCh = in
		}
	}
}
Ejemplo n.º 22
0
func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) {
	for i := 0; i < blobs; i++ {
		var (
			tpe    restic.BlobType
			length int
		)

		if rand.Float32() < pData {
			tpe = restic.DataBlob
			length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
		} else {
			tpe = restic.TreeBlob
			length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB
		}

		buf := random(t, length)
		id := restic.Hash(buf)

		if repo.Index().Has(id, restic.DataBlob) {
			t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob)
			continue
		}

		_, err := repo.SaveBlob(tpe, buf, id)
		if err != nil {
			t.Fatalf("SaveFrom() error %v", err)
		}

		if rand.Float32() < 0.2 {
			if err = repo.Flush(); err != nil {
				t.Fatalf("repo.Flush() returned error %v", err)
			}
		}
	}

	if err := repo.Flush(); err != nil {
		t.Fatalf("repo.Flush() returned error %v", err)
	}
}
Ejemplo n.º 23
0
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r restic.Repository, id restic.ID) error {
	debug.Log("checking pack %v", id.Str())
	h := restic.Handle{Type: restic.DataFile, Name: id.String()}
	buf, err := backend.LoadAll(r.Backend(), h, nil)
	if err != nil {
		return err
	}

	hash := restic.Hash(buf)
	if !hash.Equal(id) {
		debug.Log("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
		return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
	}

	blobs, err := pack.List(r.Key(), bytes.NewReader(buf), int64(len(buf)))
	if err != nil {
		return err
	}

	var errs []error
	for i, blob := range blobs {
		debug.Log("  check blob %d: %v", i, blob.ID.Str())

		plainBuf := make([]byte, blob.Length)
		n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
		if err != nil {
			debug.Log("  error decrypting blob %v: %v", blob.ID.Str(), err)
			errs = append(errs, errors.Errorf("blob %v: %v", i, err))
			continue
		}
		plainBuf = plainBuf[:n]

		hash := restic.Hash(plainBuf)
		if !hash.Equal(blob.ID) {
			debug.Log("  Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
			errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
			continue
		}
	}

	if len(errs) > 0 {
		return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
	}

	return nil
}
Ejemplo n.º 24
0
func removeLock(repo restic.Repository, id restic.ID) error {
	return repo.Backend().Remove(restic.LockFile, id.String())
}
Ejemplo n.º 25
0
// Load creates an index by loading all index files from the repo.
func Load(repo restic.Repository, p *restic.Progress) (*Index, error) {
	debug.Log("loading indexes")

	p.Start()
	defer p.Done()

	done := make(chan struct{})
	defer close(done)

	supersedes := make(map[restic.ID]restic.IDSet)
	results := make(map[restic.ID]map[restic.ID]Pack)

	index := newIndex()

	for id := range repo.List(restic.IndexFile, done) {
		p.Report(restic.Stat{Blobs: 1})

		debug.Log("Load index %v", id.Str())
		idx, err := loadIndexJSON(repo, id)
		if err != nil {
			return nil, err
		}

		res := make(map[restic.ID]Pack)
		supersedes[id] = restic.NewIDSet()
		for _, sid := range idx.Supersedes {
			debug.Log("  index %v supersedes %v", id.Str(), sid)
			supersedes[id].Insert(sid)
		}

		for _, jpack := range idx.Packs {
			entries := make([]restic.Blob, 0, len(jpack.Blobs))
			for _, blob := range jpack.Blobs {
				entry := restic.Blob{
					ID:     blob.ID,
					Type:   blob.Type,
					Offset: blob.Offset,
					Length: blob.Length,
				}
				entries = append(entries, entry)
			}

			if err = index.AddPack(jpack.ID, 0, entries); err != nil {
				return nil, err
			}
		}

		results[id] = res
		index.IndexIDs.Insert(id)
	}

	for superID, list := range supersedes {
		for indexID := range list {
			if _, ok := results[indexID]; !ok {
				continue
			}
			debug.Log("  removing index %v, superseded by %v", indexID.Str(), superID.Str())
			fmt.Fprintf(os.Stderr, "index %v can be removed, superseded by index %v\n", indexID.Str(), superID.Str())
			delete(results, indexID)
		}
	}

	return index, nil
}
Ejemplo n.º 26
0
// Repack takes a list of packs together with a list of blobs contained in
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Afterwards, the packs are removed. This operation requires
// an exclusive lock on the repo.
func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) {
	debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))

	buf := make([]byte, 0, maxPackSize)
	for packID := range packs {
		// load the complete pack
		h := restic.Handle{Type: restic.DataFile, Name: packID.String()}

		l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
		if errors.Cause(err) == io.ErrUnexpectedEOF {
			err = nil
			buf = buf[:l]
		}

		if err != nil {
			return err
		}

		debug.Log("pack %v loaded (%d bytes)", packID.Str(), len(buf))

		blobs, err := pack.List(repo.Key(), bytes.NewReader(buf), int64(len(buf)))
		if err != nil {
			return err
		}

		debug.Log("processing pack %v, blobs: %v", packID.Str(), len(blobs))
		var plaintext []byte
		for _, entry := range blobs {
			h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
			if !keepBlobs.Has(h) {
				continue
			}

			debug.Log("  process blob %v", h)

			ciphertext := buf[entry.Offset : entry.Offset+entry.Length]
			plaintext = plaintext[:len(plaintext)]
			if len(plaintext) < len(ciphertext) {
				plaintext = make([]byte, len(ciphertext))
			}

			debug.Log("  ciphertext %d, plaintext %d", len(plaintext), len(ciphertext))

			n, err := crypto.Decrypt(repo.Key(), plaintext, ciphertext)
			if err != nil {
				return err
			}
			plaintext = plaintext[:n]

			_, err = repo.SaveBlob(entry.Type, plaintext, entry.ID)
			if err != nil {
				return err
			}

			debug.Log("  saved blob %v", entry.ID.Str())

			keepBlobs.Delete(h)
		}
	}

	if err := repo.Flush(); err != nil {
		return err
	}

	for packID := range packs {
		err := repo.Backend().Remove(restic.DataFile, packID.String())
		if err != nil {
			debug.Log("error removing pack %v: %v", packID.Str(), err)
			return err
		}
		debug.Log("removed pack %v", packID.Str())
	}

	return nil
}
Ejemplo n.º 27
0
func reloadIndex(t *testing.T, repo restic.Repository) {
	repo.SetIndex(repository.NewMasterIndex())
	if err := repo.LoadIndex(); err != nil {
		t.Fatalf("error loading new index: %v", err)
	}
}
Ejemplo n.º 28
0
func saveIndex(t *testing.T, repo restic.Repository) {
	if err := repo.SaveIndex(); err != nil {
		t.Fatalf("repo.SaveIndex() %v", err)
	}
}
Ejemplo n.º 29
0
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string, tags []string) (*restic.Snapshot, restic.ID, error) {
	debug.Log("start archiving %s", name)
	sn, err := restic.NewSnapshot([]string{name}, tags)
	if err != nil {
		return nil, restic.ID{}, err
	}

	p.Start()
	defer p.Done()

	chnker := chunker.New(rd, repo.Config().ChunkerPolynomial)

	var ids restic.IDs
	var fileSize uint64

	for {
		chunk, err := chnker.Next(getBuf())
		if errors.Cause(err) == io.EOF {
			break
		}

		if err != nil {
			return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()")
		}

		id := restic.Hash(chunk.Data)

		if !repo.Index().Has(id, restic.DataBlob) {
			_, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id)
			if err != nil {
				return nil, restic.ID{}, err
			}
			debug.Log("saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
		} else {
			debug.Log("blob %v already saved in the repo\n", id.Str())
		}

		freeBuf(chunk.Data)

		ids = append(ids, id)

		p.Report(restic.Stat{Bytes: uint64(chunk.Length)})
		fileSize += uint64(chunk.Length)
	}

	tree := &restic.Tree{
		Nodes: []*restic.Node{
			&restic.Node{
				Name:       name,
				AccessTime: time.Now(),
				ModTime:    time.Now(),
				Type:       "file",
				Mode:       0644,
				Size:       fileSize,
				UID:        sn.UID,
				GID:        sn.GID,
				User:       sn.Username,
				Content:    ids,
			},
		},
	}

	treeID, err := repo.SaveTree(tree)
	if err != nil {
		return nil, restic.ID{}, err
	}
	sn.Tree = &treeID
	debug.Log("tree saved as %v", treeID.Str())

	id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn)
	if err != nil {
		return nil, restic.ID{}, err
	}

	debug.Log("snapshot saved as %v", id.Str())

	err = repo.Flush()
	if err != nil {
		return nil, restic.ID{}, err
	}

	err = repo.SaveIndex()
	if err != nil {
		return nil, restic.ID{}, err
	}

	return sn, id, nil
}
Ejemplo n.º 30
0
func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool {
	exists, err := repo.Backend().Test(restic.LockFile, id.String())
	OK(t, err)

	return exists
}