Example #1
0
func TestLockRefresh(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	lock, err := restic.NewLock(repo)
	OK(t, err)

	var lockID *restic.ID
	for id := range repo.List(restic.LockFile, nil) {
		if lockID != nil {
			t.Error("more than one lock found")
		}
		lockID = &id
	}

	OK(t, lock.Refresh())

	var lockID2 *restic.ID
	for id := range repo.List(restic.LockFile, nil) {
		if lockID2 != nil {
			t.Error("more than one lock found")
		}
		lockID2 = &id
	}

	Assert(t, !lockID.Equal(*lockID2),
		"expected a new ID after lock refresh, got the same")
	OK(t, lock.Unlock())
}
Example #2
0
func TestLoadJSONUnpacked(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	if BenchArchiveDirectory == "" {
		t.Skip("benchdir not set, skipping")
	}

	// archive a snapshot
	sn := restic.Snapshot{}
	sn.Hostname = "foobar"
	sn.Username = "******"

	id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, &sn)
	OK(t, err)

	var sn2 restic.Snapshot

	// restore
	err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, &sn2)
	OK(t, err)

	Equals(t, sn.Hostname, sn2.Hostname)
	Equals(t, sn.Username, sn2.Username)
}
Example #3
0
func TestSaveFrom(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	for _, size := range testSizes {
		data := make([]byte, size)
		_, err := io.ReadFull(rand.Reader, data)
		OK(t, err)

		id := restic.Hash(data)

		// save
		id2, err := repo.SaveBlob(restic.DataBlob, data, id)
		OK(t, err)
		Equals(t, id, id2)

		OK(t, repo.Flush())

		// read back
		buf := make([]byte, size)
		n, err := repo.LoadBlob(restic.DataBlob, id, buf)
		OK(t, err)
		Equals(t, len(buf), n)

		Assert(t, len(buf) == len(data),
			"number of bytes read back does not match: expected %d, got %d",
			len(data), len(buf))

		Assert(t, bytes.Equal(buf, data),
			"data does not match: expected %02x, got %02x",
			data, buf)
	}
}
Example #4
0
func TestCreateSnapshot(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	for i := 0; i < testCreateSnapshots; i++ {
		restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second), testDepth, 0)
	}

	snapshots, err := restic.LoadAllSnapshots(repo)
	if err != nil {
		t.Fatal(err)
	}

	if len(snapshots) != testCreateSnapshots {
		t.Fatalf("got %d snapshots, expected %d", len(snapshots), 1)
	}

	sn := snapshots[0]
	if sn.Time.Before(testSnapshotTime) || sn.Time.After(testSnapshotTime.Add(testCreateSnapshots*time.Second)) {
		t.Fatalf("timestamp %v is outside of the allowed time range", sn.Time)
	}

	if sn.Tree == nil {
		t.Fatalf("tree id is nil")
	}

	if sn.Tree.IsNull() {
		t.Fatalf("snapshot has zero tree ID")
	}

	checker.TestCheckRepo(t, repo)
}
Example #5
0
func archiveWithDedup(t testing.TB) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	if BenchArchiveDirectory == "" {
		t.Skip("benchdir not set, skipping TestArchiverDedup")
	}

	var cnt struct {
		before, after, after2 struct {
			packs, dataBlobs, treeBlobs uint
		}
	}

	// archive a few files
	sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
	t.Logf("archived snapshot %v", sn.ID().Str())

	// get archive stats
	cnt.before.packs = countPacks(repo, restic.DataFile)
	cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
	cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
	t.Logf("packs %v, data blobs %v, tree blobs %v",
		cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)

	// archive the same files again, without parent snapshot
	sn2 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
	t.Logf("archived snapshot %v", sn2.ID().Str())

	// get archive stats again
	cnt.after.packs = countPacks(repo, restic.DataFile)
	cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
	cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
	t.Logf("packs %v, data blobs %v, tree blobs %v",
		cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)

	// if there are more data blobs, something is wrong
	if cnt.after.dataBlobs > cnt.before.dataBlobs {
		t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d",
			cnt.before.dataBlobs, cnt.after.dataBlobs)
	}

	// archive the same files again, with a parent snapshot
	sn3 := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, sn2.ID())
	t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())

	// get archive stats again
	cnt.after2.packs = countPacks(repo, restic.DataFile)
	cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
	cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
	t.Logf("packs %v, data blobs %v, tree blobs %v",
		cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)

	// if there are more data blobs, something is wrong
	if cnt.after2.dataBlobs > cnt.before.dataBlobs {
		t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d",
			cnt.before.dataBlobs, cnt.after2.dataBlobs)
	}
}
Example #6
0
func TestLockExclusive(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	elock, err := restic.NewExclusiveLock(repo)
	OK(t, err)
	OK(t, elock.Unlock())
}
Example #7
0
func createFilledRepo(t testing.TB, snapshots int, dup float32) (restic.Repository, func()) {
	repo, cleanup := repository.TestRepository(t)

	for i := 0; i < 3; i++ {
		restic.TestCreateSnapshot(t, repo, snapshotTime.Add(time.Duration(i)*time.Second), depth, dup)
	}

	return repo, cleanup
}
Example #8
0
func TestLock(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	lock, err := restic.NewLock(repo)
	OK(t, err)

	OK(t, lock.Unlock())
}
Example #9
0
func TestCreateSnapshot(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	for i := 0; i < testCreateSnapshots; i++ {
		restic.TestCreateSnapshot(t, repo, testSnapshotTime.Add(time.Duration(i)*time.Second))
	}

	snapshots, err := restic.LoadAllSnapshots(repo)
	if err != nil {
		t.Fatal(err)
	}

	if len(snapshots) != testCreateSnapshots {
		t.Fatalf("got %d snapshots, expected %d", len(snapshots), 1)
	}

	sn := snapshots[0]
	if sn.Time.Before(testSnapshotTime) || sn.Time.After(testSnapshotTime.Add(testCreateSnapshots*time.Second)) {
		t.Fatalf("timestamp %v is outside of the allowed time range", sn.Time)
	}

	if sn.Tree == nil {
		t.Fatalf("tree id is nil")
	}

	if sn.Tree.IsNull() {
		t.Fatalf("snapshot has zero tree ID")
	}

	chkr := checker.New(repo)

	hints, errs := chkr.LoadIndex()
	if len(errs) != 0 {
		t.Fatalf("errors loading index: %v", errs)
	}

	if len(hints) != 0 {
		t.Fatalf("errors loading index: %v", hints)
	}

	done := make(chan struct{})
	defer close(done)
	errChan := make(chan error)
	go chkr.Structure(errChan, done)

	for err := range errChan {
		t.Error(err)
	}

	errChan = make(chan error)
	go chkr.ReadData(nil, errChan, done)

	for err := range errChan {
		t.Error(err)
	}
}
Example #10
0
func archiveDirectory(b testing.TB) {
	repo, cleanup := repository.TestRepository(b)
	defer cleanup()

	arch := archiver.New(repo)

	_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil, nil)
	OK(b, err)

	b.Logf("snapshot archived as %v", id)
}
Example #11
0
func TestMultipleLock(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	lock1, err := restic.NewLock(repo)
	OK(t, err)

	lock2, err := restic.NewLock(repo)
	OK(t, err)

	OK(t, lock1.Unlock())
	OK(t, lock2.Unlock())
}
Example #12
0
func TestDoubleUnlock(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	lock, err := restic.NewLock(repo)
	OK(t, err)

	OK(t, lock.Unlock())

	err = lock.Unlock()
	Assert(t, err != nil,
		"double unlock didn't return an error, got %v", err)
}
Example #13
0
func TestLoadTree(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	if BenchArchiveDirectory == "" {
		t.Skip("benchdir not set, skipping")
	}

	// archive a few files
	sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
	OK(t, repo.Flush())

	_, err := repo.LoadTree(*sn.Tree)
	OK(t, err)
}
Example #14
0
func TestExclusiveLockOnLockedRepo(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	elock, err := restic.NewLock(repo)
	OK(t, err)

	lock, err := restic.NewExclusiveLock(repo)
	Assert(t, err != nil,
		"create normal lock with exclusively locked repo didn't return an error")
	Assert(t, restic.IsAlreadyLocked(err),
		"create normal lock with exclusively locked repo didn't return the correct error")

	OK(t, lock.Unlock())
	OK(t, elock.Unlock())
}
Example #15
0
func TestRepositoryIncrementalIndex(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	repository.IndexFull = func(*repository.Index) bool { return true }

	// add 15 packs
	for j := 0; j < 5; j++ {
		// add 3 packs, write intermediate index
		for i := 0; i < 3; i++ {
			saveRandomDataBlobs(t, repo, 5, 1<<15)
			OK(t, repo.Flush())
		}

		OK(t, repo.SaveFullIndex())
	}

	// add another 5 packs
	for i := 0; i < 5; i++ {
		saveRandomDataBlobs(t, repo, 5, 1<<15)
		OK(t, repo.Flush())
	}

	// save final index
	OK(t, repo.SaveIndex())

	packEntries := make(map[restic.ID]map[restic.ID]struct{})

	for id := range repo.List(restic.IndexFile, nil) {
		idx, err := repository.LoadIndex(repo, id)
		OK(t, err)

		for pb := range idx.Each(nil) {
			if _, ok := packEntries[pb.PackID]; !ok {
				packEntries[pb.PackID] = make(map[restic.ID]struct{})
			}

			packEntries[pb.PackID][id] = struct{}{}
		}
	}

	for packID, ids := range packEntries {
		if len(ids) > 1 {
			t.Errorf("pack %v listed in %d indexes\n", packID, len(ids))
		}
	}
}
Example #16
0
func BenchmarkChunkEncrypt(b *testing.B) {
	repo, cleanup := repository.TestRepository(b)
	defer cleanup()

	data := Random(23, 10<<20) // 10MiB
	rd := bytes.NewReader(data)

	buf := make([]byte, chunker.MaxSize)
	buf2 := make([]byte, chunker.MaxSize)

	b.ResetTimer()
	b.SetBytes(int64(len(data)))

	for i := 0; i < b.N; i++ {
		benchmarkChunkEncrypt(b, buf, buf2, rd, repo.Key())
	}
}
Example #17
0
func BenchmarkChunkEncryptParallel(b *testing.B) {
	repo, cleanup := repository.TestRepository(b)
	defer cleanup()

	data := Random(23, 10<<20) // 10MiB

	buf := make([]byte, chunker.MaxSize)

	b.ResetTimer()
	b.SetBytes(int64(len(data)))

	b.RunParallel(func(pb *testing.PB) {
		for pb.Next() {
			rd := bytes.NewReader(data)
			benchmarkChunkEncryptP(pb, buf, rd, repo.Key())
		}
	})
}
Example #18
0
func BenchmarkFindUsedBlobs(b *testing.B) {
	repo, cleanup := repository.TestRepository(b)
	defer cleanup()

	sn := restic.TestCreateSnapshot(b, repo, findTestTime, findTestDepth, 0)

	b.ResetTimer()

	for i := 0; i < b.N; i++ {
		seen := restic.NewBlobSet()
		blobs := restic.NewBlobSet()
		err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen)
		if err != nil {
			b.Error(err)
		}

		b.Logf("found %v blobs", len(blobs))
	}
}
Example #19
0
func BenchmarkLoadTree(t *testing.B) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	if BenchArchiveDirectory == "" {
		t.Skip("benchdir not set, skipping")
	}

	// archive a few files
	sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
	OK(t, repo.Flush())

	t.ResetTimer()

	for i := 0; i < t.N; i++ {
		_, err := repo.LoadTree(*sn.Tree)
		OK(t, err)
	}
}
Example #20
0
func testParallelSaveWithDuplication(t *testing.T, seed int) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	dataSizeMb := 128
	duplication := 7

	arch := archiver.New(repo)
	chunks := getRandomData(seed, dataSizeMb*1024*1024)

	errChannels := [](<-chan error){}

	// interweaved processing of subsequent chunks
	maxParallel := 2*duplication - 1
	barrier := make(chan struct{}, maxParallel)

	for _, c := range chunks {
		for dupIdx := 0; dupIdx < duplication; dupIdx++ {
			errChan := make(chan error)
			errChannels = append(errChannels, errChan)

			go func(c chunker.Chunk, errChan chan<- error) {
				barrier <- struct{}{}

				id := restic.Hash(c.Data)
				time.Sleep(time.Duration(id[0]))
				err := arch.Save(restic.DataBlob, c.Data, id)
				<-barrier
				errChan <- err
			}(c, errChan)
		}
	}

	for _, errChan := range errChannels {
		OK(t, <-errChan)
	}

	OK(t, repo.Flush())
	OK(t, repo.SaveIndex())

	chkr := createAndInitChecker(t, repo)
	assertNoUnreferencedPacks(t, chkr)
}
Example #21
0
func TestLoadTree(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	// save tree
	tree := restic.NewTree()
	id, err := repo.SaveTree(tree)
	OK(t, err)

	// save packs
	OK(t, repo.Flush())

	// load tree again
	tree2, err := repo.LoadTree(id)
	OK(t, err)

	Assert(t, tree.Equals(tree2),
		"trees are not equal: want %v, got %v",
		tree, tree2)
}
Example #22
0
func BenchmarkSaveAndEncrypt(t *testing.B) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	size := 4 << 20 // 4MiB

	data := make([]byte, size)
	_, err := io.ReadFull(rand.Reader, data)
	OK(t, err)

	id := restic.ID(sha256.Sum256(data))

	t.ResetTimer()
	t.SetBytes(int64(size))

	for i := 0; i < t.N; i++ {
		// save
		_, err = repo.SaveBlob(restic.DataBlob, data, id)
		OK(t, err)
	}
}
Example #23
0
func TestIndexLoadDocReference(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	id, err := repo.SaveUnpacked(restic.IndexFile, docExample)
	if err != nil {
		t.Fatalf("SaveUnpacked() returned error %v", err)
	}

	t.Logf("index saved as %v", id.Str())

	idx := loadIndex(t, repo)

	blobID := restic.TestParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66")
	locs, err := idx.FindBlob(restic.BlobHandle{ID: blobID, Type: restic.DataBlob})
	if err != nil {
		t.Errorf("FindBlob() returned error %v", err)
	}

	if len(locs) != 1 {
		t.Errorf("blob found %d times, expected just one", len(locs))
	}

	l := locs[0]
	if !l.ID.Equal(blobID) {
		t.Errorf("blob IDs are not equal: %v != %v", l.ID, blobID)
	}

	if l.Type != restic.DataBlob {
		t.Errorf("want type %v, got %v", restic.DataBlob, l.Type)
	}

	if l.Offset != 150 {
		t.Errorf("wrong offset, want %d, got %v", 150, l.Offset)
	}

	if l.Length != 123 {
		t.Errorf("wrong length, want %d, got %v", 123, l.Length)
	}
}
Example #24
0
func BenchmarkArchiveReader(t *testing.B) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	const size = 50 * 1024 * 1024

	buf := make([]byte, size)
	_, err := io.ReadFull(fakeFile(t, 23, size), buf)
	if err != nil {
		t.Fatal(err)
	}

	t.SetBytes(size)
	t.ResetTimer()

	for i := 0; i < t.N; i++ {
		_, _, err := ArchiveReader(repo, nil, bytes.NewReader(buf), "fakefile", []string{"test"})
		if err != nil {
			t.Fatal(err)
		}
	}
}
Example #25
0
func TestRemoveAllLocks(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid())
	OK(t, err)

	id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid())
	OK(t, err)

	id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000)
	OK(t, err)

	OK(t, restic.RemoveAllLocks(repo))

	Assert(t, lockExists(repo, t, id1) == false,
		"lock still exists after RemoveAllLocks was called")
	Assert(t, lockExists(repo, t, id2) == false,
		"lock still exists after RemoveAllLocks was called")
	Assert(t, lockExists(repo, t, id3) == false,
		"lock still exists after RemoveAllLocks was called")
}
Example #26
0
func TestArchiveReader(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	seed := rand.Int63()
	size := int64(rand.Intn(50*1024*1024) + 50*1024*1024)
	t.Logf("seed is 0x%016x, size is %v", seed, size)

	f := fakeFile(t, seed, size)

	sn, id, err := ArchiveReader(repo, nil, f, "fakefile", []string{"test"})
	if err != nil {
		t.Fatalf("ArchiveReader() returned error %v", err)
	}

	if id.IsNull() {
		t.Fatalf("ArchiveReader() returned null ID")
	}

	t.Logf("snapshot saved as %v, tree is %v", id.Str(), sn.Tree.Str())

	checkSavedFile(t, repo, *sn.Tree, "fakefile", fakeFile(t, seed, size))
}
Example #27
0
func TestFindUsedBlobs(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	var snapshots []*restic.Snapshot
	for i := 0; i < findTestSnapshots; i++ {
		sn := restic.TestCreateSnapshot(t, repo, findTestTime.Add(time.Duration(i)*time.Second), findTestDepth, 0)
		t.Logf("snapshot %v saved, tree %v", sn.ID().Str(), sn.Tree.Str())
		snapshots = append(snapshots, sn)
	}

	for i, sn := range snapshots {
		usedBlobs := restic.NewBlobSet()
		err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, restic.NewBlobSet())
		if err != nil {
			t.Errorf("FindUsedBlobs returned error: %v", err)
			continue
		}

		if len(usedBlobs) == 0 {
			t.Errorf("FindUsedBlobs returned an empty set")
			continue
		}

		goldenFilename := filepath.Join("testdata", fmt.Sprintf("used_blobs_snapshot%d", i))
		want := loadIDSet(t, goldenFilename)

		if !want.Equals(usedBlobs) {
			t.Errorf("snapshot %d: wrong list of blobs returned:\n  missing blobs: %v\n  extra blobs: %v",
				i, want.Sub(usedBlobs), usedBlobs.Sub(want))
		}

		if *updateGoldenFiles {
			saveIDSet(t, goldenFilename, usedBlobs)
		}
	}
}
Example #28
0
func TestLockWithStaleLock(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	id1, err := createFakeLock(repo, time.Now().Add(-time.Hour), os.Getpid())
	OK(t, err)

	id2, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid())
	OK(t, err)

	id3, err := createFakeLock(repo, time.Now().Add(-time.Minute), os.Getpid()+500000)
	OK(t, err)

	OK(t, restic.RemoveStaleLocks(repo))

	Assert(t, lockExists(repo, t, id1) == false,
		"stale lock still exists after RemoveStaleLocks was called")
	Assert(t, lockExists(repo, t, id2) == true,
		"non-stale lock was removed by RemoveStaleLocks")
	Assert(t, lockExists(repo, t, id3) == false,
		"stale lock still exists after RemoveStaleLocks was called")

	OK(t, removeLock(repo, id2))
}
Example #29
0
func TestWalkTree(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	dirs, err := filepath.Glob(TestWalkerPath)
	OK(t, err)

	// archive a few files
	arch := archiver.New(repo)
	sn, _, err := arch.Snapshot(nil, dirs, nil, nil)
	OK(t, err)

	// flush repo, write all packs
	OK(t, repo.Flush())

	done := make(chan struct{})

	// start tree walker
	treeJobs := make(chan walk.TreeJob)
	go walk.Tree(repo, *sn.Tree, done, treeJobs)

	// start filesystem walker
	fsJobs := make(chan pipe.Job)
	resCh := make(chan pipe.Result, 1)

	f := func(string, os.FileInfo) bool {
		return true
	}
	go pipe.Walk(dirs, f, done, fsJobs, resCh)

	for {
		// receive fs job
		fsJob, fsChOpen := <-fsJobs
		Assert(t, !fsChOpen || fsJob != nil,
			"received nil job from filesystem: %v %v", fsJob, fsChOpen)
		if fsJob != nil {
			OK(t, fsJob.Error())
		}

		var path string
		fsEntries := 1
		switch j := fsJob.(type) {
		case pipe.Dir:
			path = j.Path()
			fsEntries = len(j.Entries)
		case pipe.Entry:
			path = j.Path()
		}

		// receive tree job
		treeJob, treeChOpen := <-treeJobs
		treeEntries := 1

		OK(t, treeJob.Error)

		if treeJob.Tree != nil {
			treeEntries = len(treeJob.Tree.Nodes)
		}

		Assert(t, fsChOpen == treeChOpen,
			"one channel closed too early: fsChOpen %v, treeChOpen %v",
			fsChOpen, treeChOpen)

		if !fsChOpen || !treeChOpen {
			break
		}

		Assert(t, filepath.Base(path) == filepath.Base(treeJob.Path),
			"paths do not match: %q != %q", filepath.Base(path), filepath.Base(treeJob.Path))

		Assert(t, fsEntries == treeEntries,
			"wrong number of entries: %v != %v", fsEntries, treeEntries)
	}
}
Example #30
0
func TestRepack(t *testing.T) {
	repo, cleanup := repository.TestRepository(t)
	defer cleanup()

	createRandomBlobs(t, repo, 100, 0.7)

	packsBefore := listPacks(t, repo)

	// Running repack on empty ID sets should not do anything at all.
	repack(t, repo, nil, nil)

	packsAfter := listPacks(t, repo)

	if !packsAfter.Equals(packsBefore) {
		t.Fatalf("packs are not equal, Repack modified something. Before:\n  %v\nAfter:\n  %v",
			packsBefore, packsAfter)
	}

	saveIndex(t, repo)

	removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2)

	removePacks := findPacksForBlobs(t, repo, removeBlobs)

	repack(t, repo, removePacks, keepBlobs)
	rebuildIndex(t, repo)
	reloadIndex(t, repo)

	packsAfter = listPacks(t, repo)
	for id := range removePacks {
		if packsAfter.Has(id) {
			t.Errorf("pack %v still present although it should have been repacked and removed", id.Str())
		}
	}

	idx := repo.Index()

	for h := range keepBlobs {
		list, err := idx.Lookup(h.ID, h.Type)
		if err != nil {
			t.Errorf("unable to find blob %v in repo", h.ID.Str())
			continue
		}

		if len(list) != 1 {
			t.Errorf("expected one pack in the list, got: %v", list)
			continue
		}

		pb := list[0]

		if removePacks.Has(pb.PackID) {
			t.Errorf("lookup returned pack ID %v that should've been removed", pb.PackID)
		}
	}

	for h := range removeBlobs {
		if _, err := idx.Lookup(h.ID, h.Type); err == nil {
			t.Errorf("blob %v still contained in the repo", h)
		}
	}
}