Exemplo n.º 1
0
func archiveDirectory(b testing.TB) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	arch := restic.NewArchiver(repo)

	_, id, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
	OK(b, err)

	b.Logf("snapshot archived as %v", id)
}
Exemplo n.º 2
0
func TestCache(t *testing.T) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	_, err := restic.NewCache(repo, "")
	OK(t, err)

	arch := restic.NewArchiver(repo)

	// archive some files, this should automatically cache all blobs from the snapshot
	_, _, err = arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)

	// TODO: test caching index
}
Exemplo n.º 3
0
func testParallelSaveWithDuplication(t *testing.T, seed int) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	dataSizeMb := 128
	duplication := 7

	arch := restic.NewArchiver(repo)
	data, chunks := getRandomData(seed, dataSizeMb*1024*1024)
	reader := bytes.NewReader(data)

	errChannels := [](<-chan error){}

	// interweaved processing of subsequent chunks
	maxParallel := 2*duplication - 1
	barrier := make(chan struct{}, maxParallel)

	for _, c := range chunks {
		for dupIdx := 0; dupIdx < duplication; dupIdx++ {
			errChan := make(chan error)
			errChannels = append(errChannels, errChan)

			go func(reader *bytes.Reader, c *chunker.Chunk, errChan chan<- error) {
				barrier <- struct{}{}

				hash := c.Digest
				id := backend.ID{}
				copy(id[:], hash)

				time.Sleep(time.Duration(hash[0]))
				err := arch.Save(pack.Data, id, c.Length, c.Reader(reader))
				<-barrier
				errChan <- err
			}(reader, c, errChan)
		}
	}

	for _, errChan := range errChannels {
		OK(t, <-errChan)
	}

	OK(t, repo.Flush())
	OK(t, repo.SaveIndex())

	chkr := createAndInitChecker(t, repo)
	assertNoUnreferencedPacks(t, chkr)
}
Exemplo n.º 4
0
func TestCheckerModifiedData(t *testing.T) {
	be := mem.New()

	repo := repository.New(be)
	OK(t, repo.Init(TestPassword))

	arch := restic.NewArchiver(repo)
	_, id, err := arch.Snapshot(nil, []string{"."}, nil)
	OK(t, err)
	t.Logf("archived as %v", id.Str())

	beError := &errorBackend{Backend: be}
	checkRepo := repository.New(beError)
	OK(t, checkRepo.SearchKey(TestPassword))

	chkr := checker.New(checkRepo)

	hints, errs := chkr.LoadIndex()
	if len(errs) > 0 {
		t.Fatalf("expected no errors, got %v: %v", len(errs), errs)
	}

	if len(hints) > 0 {
		t.Errorf("expected no hints, got %v: %v", len(hints), hints)
	}

	beError.ProduceErrors = true
	errFound := false
	for _, err := range checkPacks(chkr) {
		t.Logf("pack error: %v", err)
	}

	for _, err := range checkStruct(chkr) {
		t.Logf("struct error: %v", err)
	}

	for _, err := range checkData(chkr) {
		t.Logf("struct error: %v", err)
		errFound = true
	}

	if !errFound {
		t.Fatal("no error found, checker is broken")
	}
}
Exemplo n.º 5
0
func BenchmarkLoadTree(t *testing.B) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	if BenchArchiveDirectory == "" {
		t.Skip("benchdir not set, skipping TestArchiverDedup")
	}

	// archive a few files
	arch := restic.NewArchiver(repo)
	sn, _, err := arch.Snapshot(nil, []string{BenchArchiveDirectory}, nil)
	OK(t, err)
	t.Logf("archived snapshot %v", sn.ID())

	list := make([]backend.ID, 0, 10)
	done := make(chan struct{})

	for _, idx := range repo.Index().All() {
		for blob := range idx.Each(done) {
			if blob.Type != pack.Tree {
				continue
			}

			list = append(list, blob.ID)
			if len(list) == cap(list) {
				close(done)
				break
			}
		}
	}

	// start benchmark
	t.ResetTimer()

	for i := 0; i < t.N; i++ {
		for _, id := range list {
			_, err := restic.LoadTree(repo, id)
			OK(t, err)
		}
	}
}
func testArchiverDuplication(t *testing.T) {
	_, err := io.ReadFull(rand.Reader, DupID[:])
	if err != nil {
		t.Fatal(err)
	}

	repo := repository.New(forgetfulBackend())

	err = repo.Init("foo")
	if err != nil {
		t.Fatal(err)
	}

	arch := restic.NewArchiver(repo)

	wg := &sync.WaitGroup{}
	done := make(chan struct{})
	for i := 0; i < parallelSaves; i++ {
		wg.Add(1)
		go func() {
			defer wg.Done()
			for {
				select {
				case <-done:
					return
				default:
				}

				id := randomID()

				if repo.Index().Has(id) {
					continue
				}

				buf := make([]byte, 50)

				err := arch.Save(pack.Data, buf, id)
				if err != nil {
					t.Fatal(err)
				}
			}
		}()
	}

	saveIndex := func() {
		defer wg.Done()

		ticker := time.NewTicker(testSaveIndexTime)
		defer ticker.Stop()

		for {
			select {
			case <-done:
				return
			case <-ticker.C:
				err := repo.SaveFullIndex()
				if err != nil {
					t.Fatal(err)
				}
			}
		}
	}

	wg.Add(1)
	go saveIndex()

	<-time.After(testTimeout)
	close(done)

	wg.Wait()
}
Exemplo n.º 7
0
func TestWalkTree(t *testing.T) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	dirs, err := filepath.Glob(TestWalkerPath)
	OK(t, err)

	// archive a few files
	arch := restic.NewArchiver(repo)
	sn, _, err := arch.Snapshot(nil, dirs, nil)
	OK(t, err)

	// flush repo, write all packs
	OK(t, repo.Flush())

	done := make(chan struct{})

	// start tree walker
	treeJobs := make(chan restic.WalkTreeJob)
	go restic.WalkTree(repo, *sn.Tree, done, treeJobs)

	// start filesystem walker
	fsJobs := make(chan pipe.Job)
	resCh := make(chan pipe.Result, 1)

	f := func(string, os.FileInfo) bool {
		return true
	}
	go pipe.Walk(dirs, f, done, fsJobs, resCh)

	for {
		// receive fs job
		fsJob, fsChOpen := <-fsJobs
		Assert(t, !fsChOpen || fsJob != nil,
			"received nil job from filesystem: %v %v", fsJob, fsChOpen)
		if fsJob != nil {
			OK(t, fsJob.Error())
		}

		var path string
		fsEntries := 1
		switch j := fsJob.(type) {
		case pipe.Dir:
			path = j.Path()
			fsEntries = len(j.Entries)
		case pipe.Entry:
			path = j.Path()
		}

		// receive tree job
		treeJob, treeChOpen := <-treeJobs
		treeEntries := 1

		OK(t, treeJob.Error)

		if treeJob.Tree != nil {
			treeEntries = len(treeJob.Tree.Nodes)
		}

		Assert(t, fsChOpen == treeChOpen,
			"one channel closed too early: fsChOpen %v, treeChOpen %v",
			fsChOpen, treeChOpen)

		if !fsChOpen || !treeChOpen {
			break
		}

		Assert(t, filepath.Base(path) == filepath.Base(treeJob.Path),
			"paths do not match: %q != %q", filepath.Base(path), filepath.Base(treeJob.Path))

		Assert(t, fsEntries == treeEntries,
			"wrong number of entries: %v != %v", fsEntries, treeEntries)
	}
}
Exemplo n.º 8
0
func (cmd CmdBackup) Execute(args []string) error {
	if len(args) == 0 {
		return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
	}

	target := make([]string, 0, len(args))
	for _, d := range args {
		if a, err := filepath.Abs(d); err == nil {
			d = a
		}
		target = append(target, d)
	}

	target, err := filterExisting(target)
	if err != nil {
		return err
	}

	repo, err := cmd.global.OpenRepository()
	if err != nil {
		return err
	}

	lock, err := lockRepo(repo)
	defer unlockRepo(lock)
	if err != nil {
		return err
	}

	err = repo.LoadIndex()
	if err != nil {
		return err
	}

	var parentSnapshotID *backend.ID

	// Force using a parent
	if !cmd.Force && cmd.Parent != "" {
		id, err := restic.FindSnapshot(repo, cmd.Parent)
		if err != nil {
			return fmt.Errorf("invalid id %q: %v", cmd.Parent, err)
		}

		parentSnapshotID = &id
	}

	// Find last snapshot to set it as parent, if not already set
	if !cmd.Force && parentSnapshotID == nil {
		id, err := findLatestSnapshot(repo, target)
		if err == nil {
			parentSnapshotID = &id
		} else if err != errNoSnapshotFound {
			return err
		}
	}

	if parentSnapshotID != nil {
		cmd.global.Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
	}

	cmd.global.Verbosef("scan %v\n", target)

	selectFilter := func(item string, fi os.FileInfo) bool {
		matched, err := filter.List(cmd.Excludes, item)
		if err != nil {
			cmd.global.Warnf("error for exclude pattern: %v", err)
		}

		if matched {
			debug.Log("backup.Execute", "path %q excluded by a filter", item)
		}

		return !matched
	}

	stat, err := restic.Scan(target, selectFilter, cmd.newScanProgress())
	if err != nil {
		return err
	}

	arch := restic.NewArchiver(repo)
	arch.Excludes = cmd.Excludes
	arch.SelectFilter = selectFilter

	arch.Error = func(dir string, fi os.FileInfo, err error) error {
		// TODO: make ignoring errors configurable
		cmd.global.Warnf("\x1b[2K\rerror for %s: %v\n", dir, err)
		return nil
	}

	_, id, err := arch.Snapshot(cmd.newArchiveProgress(stat), target, parentSnapshotID)
	if err != nil {
		return err
	}

	cmd.global.Verbosef("snapshot %s saved\n", id.Str())

	return nil
}
Exemplo n.º 9
0
func SnapshotDir(t testing.TB, repo *repository.Repository, path string, parent *backend.ID) *restic.Snapshot {
	arch := restic.NewArchiver(repo)
	sn, _, err := arch.Snapshot(nil, []string{path}, parent)
	OK(t, err)
	return sn
}