Exemple #1
0
func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error {
	debug.Log("node %v, dir %v, dst %v", node.Name, dir, dst)
	dstPath := filepath.Join(dst, dir, node.Name)

	err := node.CreateAt(dstPath, res.repo)
	if err != nil {
		debug.Log("node.CreateAt(%s) error %v", dstPath, err)
	}

	// Did it fail because of ENOENT?
	if err != nil && os.IsNotExist(errors.Cause(err)) {
		debug.Log("create intermediate paths")

		// Create parent directories and retry
		err = fs.MkdirAll(filepath.Dir(dstPath), 0700)
		if err == nil || os.IsExist(errors.Cause(err)) {
			err = node.CreateAt(dstPath, res.repo)
		}
	}

	if err != nil {
		debug.Log("error %v", err)
		err = res.Error(dstPath, node, err)
		if err != nil {
			return err
		}
	}

	debug.Log("successfully restored %v", node.Name)

	return nil
}
Exemple #2
0
func TestRestoreLatest(t *testing.T) {

	withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
		testRunInit(t, gopts)

		p := filepath.Join(env.testdata, "testfile.c")
		OK(t, os.MkdirAll(filepath.Dir(p), 0755))
		OK(t, appendRandomData(p, 100))

		opts := BackupOptions{}

		testRunBackup(t, []string{env.testdata}, opts, gopts)
		testRunCheck(t, gopts)

		os.Remove(p)
		OK(t, appendRandomData(p, 101))
		testRunBackup(t, []string{env.testdata}, opts, gopts)
		testRunCheck(t, gopts)

		// Restore latest without any filters
		testRunRestoreLatest(t, gopts, filepath.Join(env.base, "restore0"), nil, "")
		OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", "testfile.c"), int64(101)))

		// Setup test files in different directories backed up in different snapshots
		p1 := filepath.Join(env.testdata, "p1/testfile.c")
		OK(t, os.MkdirAll(filepath.Dir(p1), 0755))
		OK(t, appendRandomData(p1, 102))
		testRunBackup(t, []string{filepath.Dir(p1)}, opts, gopts)
		testRunCheck(t, gopts)

		p2 := filepath.Join(env.testdata, "p2/testfile.c")
		OK(t, os.MkdirAll(filepath.Dir(p2), 0755))
		OK(t, appendRandomData(p2, 103))
		testRunBackup(t, []string{filepath.Dir(p2)}, opts, gopts)
		testRunCheck(t, gopts)

		p1rAbs := filepath.Join(env.base, "restore1", "p1/testfile.c")
		p2rAbs := filepath.Join(env.base, "restore2", "p2/testfile.c")

		testRunRestoreLatest(t, gopts, filepath.Join(env.base, "restore1"), []string{filepath.Dir(p1)}, "")
		OK(t, testFileSize(p1rAbs, int64(102)))
		if _, err := os.Stat(p2rAbs); os.IsNotExist(errors.Cause(err)) {
			Assert(t, os.IsNotExist(errors.Cause(err)),
				"expected %v to not exist in restore, but it exists, err %v", p2rAbs, err)
		}

		testRunRestoreLatest(t, gopts, filepath.Join(env.base, "restore2"), []string{filepath.Dir(p2)}, "")
		OK(t, testFileSize(p2rAbs, int64(103)))
		if _, err := os.Stat(p1rAbs); os.IsNotExist(errors.Cause(err)) {
			Assert(t, os.IsNotExist(errors.Cause(err)),
				"expected %v to not exist in restore, but it exists, err %v", p1rAbs, err)
		}

	})
}
Exemple #3
0
// Open opens a file for reading, without updating the atime and without caching data on read.
func Open(name string) (File, error) {
	file, err := os.OpenFile(name, os.O_RDONLY|syscall.O_NOATIME, 0)
	if os.IsPermission(errors.Cause(err)) {
		file, err = os.OpenFile(name, os.O_RDONLY, 0)
	}
	return &nonCachingFile{File: file}, err
}
Exemple #4
0
func TestBackupNonExistingFile(t *testing.T) {
	withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
		datafile := filepath.Join("testdata", "backup-data.tar.gz")
		fd, err := os.Open(datafile)
		if os.IsNotExist(errors.Cause(err)) {
			t.Skipf("unable to find data file %q, skipping", datafile)
			return
		}
		OK(t, err)
		OK(t, fd.Close())

		SetupTarTestFixture(t, env.testdata, datafile)

		testRunInit(t, gopts)
		globalOptions.stderr = ioutil.Discard
		defer func() {
			globalOptions.stderr = os.Stderr
		}()

		p := filepath.Join(env.testdata, "0", "0")
		dirs := []string{
			filepath.Join(p, "0"),
			filepath.Join(p, "1"),
			filepath.Join(p, "nonexisting"),
			filepath.Join(p, "5"),
		}

		opts := BackupOptions{}

		testRunBackup(t, dirs, opts, gopts)
	})
}
Exemple #5
0
// IsAlreadyLocked returns true iff err is an instance of ErrAlreadyLocked.
func IsAlreadyLocked(err error) bool {
	if _, ok := errors.Cause(err).(ErrAlreadyLocked); ok {
		return true
	}

	return false
}
Exemple #6
0
// saveFile reads from rd and saves the blobs in the repository. The list of
// IDs is returned.
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
	blobs = IDs{}
	ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial)

	for {
		chunk, err := ch.Next(getBuf())
		if errors.Cause(err) == io.EOF {
			break
		}

		if err != nil {
			fs.t.Fatalf("unable to save chunk in repo: %v", err)
		}

		id := Hash(chunk.Data)
		if !fs.blobIsKnown(id, DataBlob) {
			_, err := fs.repo.SaveBlob(DataBlob, chunk.Data, id)
			if err != nil {
				fs.t.Fatalf("error saving chunk: %v", err)
			}

			fs.knownBlobs.Insert(id)
		}
		freeBuf(chunk.Data)

		blobs = append(blobs, id)
	}

	return blobs
}
Exemple #7
0
func initDebugLogger() {
	debugfile := os.Getenv("DEBUG_LOG")
	if debugfile == "" {
		return
	}

	fmt.Fprintf(os.Stderr, "debug log file %v\n", debugfile)

	f, err := fs.OpenFile(debugfile, os.O_WRONLY|os.O_APPEND, 0600)

	if err == nil {
		_, err = f.Seek(2, 0)
		if err != nil {
			fmt.Fprintf(os.Stderr, "unable to seek to the end of %v: %v\n", debugfile, err)
			os.Exit(3)
		}
	}

	if err != nil && os.IsNotExist(errors.Cause(err)) {
		f, err = fs.OpenFile(debugfile, os.O_WRONLY|os.O_CREATE, 0600)
	}

	if err != nil {
		fmt.Fprintf(os.Stderr, "unable to open debug log file: %v\n", err)
		os.Exit(2)
	}

	opts.logger = log.New(f, "", log.LstdFlags)
}
Exemple #8
0
// SearchKey tries to decrypt at most maxKeys keys in the backend with the
// given password. If none could be found, ErrNoKeyFound is returned. When
// maxKeys is reached, ErrMaxKeysReached is returned. When setting maxKeys to
// zero, all keys in the repo are checked.
func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
	checked := 0

	// try at most maxKeysForSearch keys in repo
	done := make(chan struct{})
	defer close(done)
	for name := range s.Backend().List(restic.KeyFile, done) {
		if maxKeys > 0 && checked > maxKeys {
			return nil, ErrMaxKeysReached
		}

		debug.Log("trying key %v", name[:12])
		key, err := OpenKey(s, name, password)
		if err != nil {
			debug.Log("key %v returned error %v", name[:12], err)

			// ErrUnauthenticated means the password is wrong, try the next key
			if errors.Cause(err) == crypto.ErrUnauthenticated {
				continue
			}

			return nil, err
		}

		debug.Log("successfully opened key %v", name[:12])
		return key, nil
	}

	return nil, ErrNoKeyFound
}
Exemple #9
0
func main() {
	debug.Log("main %#v", os.Args)
	err := cmdRoot.Execute()

	switch {
	case restic.IsAlreadyLocked(errors.Cause(err)):
		fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err)
	case errors.IsFatal(errors.Cause(err)):
		fmt.Fprintf(os.Stderr, "%v\n", err)
	case err != nil:
		fmt.Fprintf(os.Stderr, "%+v\n", err)
	}

	RunCleanupHandlers()

	if err != nil {
		os.Exit(1)
	}
}
Exemple #10
0
func main() {
	debug.Log("main %#v", os.Args)
	err := cmdRoot.Execute()

	switch {
	case restic.IsAlreadyLocked(errors.Cause(err)):
		fmt.Fprintf(os.Stderr, "%v\nthe `unlock` command can be used to remove stale locks\n", err)
	case errors.IsFatal(errors.Cause(err)):
		fmt.Fprintf(os.Stderr, "%v\n", err)
	case err != nil:
		fmt.Fprintf(os.Stderr, "%+v\n", err)
	}

	var exitCode int
	if err != nil {
		exitCode = 1
	}

	Exit(exitCode)
}
Exemple #11
0
// TestLoadNegativeOffset tests the backend's Load function with negative offsets.
func TestLoadNegativeOffset(t testing.TB) {
	b := open(t)
	defer close(t)

	length := rand.Intn(1<<24) + 2000

	data := test.Random(23, length)
	id := restic.Hash(data)

	handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
	err := b.Save(handle, data)
	if err != nil {
		t.Fatalf("Save() error: %v", err)
	}

	// test normal reads
	for i := 0; i < 50; i++ {
		l := rand.Intn(length + 2000)
		o := -rand.Intn(length + 2000)

		buf := make([]byte, l)
		n, err := b.Load(handle, buf, int64(o))

		// if we requested data beyond the end of the file, require
		// ErrUnexpectedEOF error
		if len(buf) > -o {
			if errors.Cause(err) != io.ErrUnexpectedEOF {
				t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), o)
				continue
			}
			err = nil
			buf = buf[:-o]
		}

		if err != nil {
			t.Errorf("Load(%d, %d) returned error: %v", len(buf), o, err)
			continue
		}

		if n != len(buf) {
			t.Errorf("Load(%d, %d) returned short read, only got %d bytes", len(buf), o, n)
			continue
		}

		p := len(data) + o
		if !bytes.Equal(buf, data[p:p+len(buf)]) {
			t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), o)
			continue
		}

	}

	test.OK(t, b.Remove(restic.DataFile, id.String()))
}
Exemple #12
0
// readPassword reads the password from the given reader directly.
func readPassword(in io.Reader) (password string, err error) {
	buf := make([]byte, 1000)
	n, err := io.ReadFull(in, buf)
	buf = buf[:n]

	if err != nil && errors.Cause(err) != io.ErrUnexpectedEOF {
		return "", errors.Wrap(err, "ReadFull")
	}

	return strings.TrimRight(string(buf), "\r\n"), nil
}
Exemple #13
0
// Test returns true if a blob of the given type and name exists in the backend.
func (b *Local) Test(t restic.FileType, name string) (bool, error) {
	debug.Log("Test %v %v", t, name)
	_, err := fs.Stat(filename(b.p, t, name))
	if err != nil {
		if os.IsNotExist(errors.Cause(err)) {
			return false, nil
		}
		return false, errors.Wrap(err, "Stat")
	}

	return true, nil
}
Exemple #14
0
func TestBackupDirectoryError(t *testing.T) {
	withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
		datafile := filepath.Join("testdata", "backup-data.tar.gz")
		fd, err := os.Open(datafile)
		if os.IsNotExist(errors.Cause(err)) {
			t.Skipf("unable to find data file %q, skipping", datafile)
			return
		}
		OK(t, err)
		OK(t, fd.Close())

		SetupTarTestFixture(t, env.testdata, datafile)

		testRunInit(t, gopts)

		globalOptions.stderr = ioutil.Discard
		defer func() {
			globalOptions.stderr = os.Stderr
		}()

		ranHook := false

		testdir := filepath.Join(env.testdata, "0", "0", "9")

		// install hook that removes the dir right before readdirnames()
		debug.Hook("pipe.readdirnames", func(context interface{}) {
			path := context.(string)

			if path != testdir {
				return
			}

			t.Logf("in hook, removing test file %v", testdir)
			ranHook = true

			OK(t, os.RemoveAll(testdir))
		})

		testRunBackup(t, []string{filepath.Join(env.testdata, "0", "0")}, BackupOptions{}, gopts)
		testRunCheck(t, gopts)

		Assert(t, ranHook, "hook did not run")
		debug.RemoveHook("pipe.walk2")

		snapshots := testRunList(t, "snapshots", gopts)
		Assert(t, len(snapshots) > 0,
			"no snapshots found in repo (%v)", datafile)

		files := testRunLs(t, gopts, snapshots[0].String())

		Assert(t, len(files) > 1, "snapshot is empty")
	})
}
Exemple #15
0
// LoadIndex loads the index id from backend and returns it.
func LoadIndex(repo restic.Repository, id restic.ID) (*Index, error) {
	idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex)
	if err == nil {
		return idx, nil
	}

	if errors.Cause(err) == ErrOldIndexFormat {
		fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str())
		return LoadIndexWithDecoder(repo, id, DecodeOldIndex)
	}

	return nil, err
}
Exemple #16
0
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) {
	ch := chunker.New(rd, testPol)

	for {
		chunk, err := ch.Next(buf)
		if errors.Cause(err) == io.EOF {
			break
		}

		// reduce length of chunkBuf
		crypto.Encrypt(key, chunk.Data, chunk.Data)
	}
}
Exemple #17
0
func TestRestoreFilter(t *testing.T) {
	testfiles := []struct {
		name string
		size uint
	}{
		{"testfile1.c", 100},
		{"testfile2.exe", 101},
		{"subdir1/subdir2/testfile3.docx", 102},
		{"subdir1/subdir2/testfile4.c", 102},
	}

	withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
		testRunInit(t, gopts)

		for _, test := range testfiles {
			p := filepath.Join(env.testdata, test.name)
			OK(t, os.MkdirAll(filepath.Dir(p), 0755))
			OK(t, appendRandomData(p, test.size))
		}

		opts := BackupOptions{}

		testRunBackup(t, []string{env.testdata}, opts, gopts)
		testRunCheck(t, gopts)

		snapshotID := testRunList(t, "snapshots", gopts)[0]

		// no restore filter should restore all files
		testRunRestore(t, gopts, filepath.Join(env.base, "restore0"), snapshotID)
		for _, test := range testfiles {
			OK(t, testFileSize(filepath.Join(env.base, "restore0", "testdata", test.name), int64(test.size)))
		}

		for i, pat := range []string{"*.c", "*.exe", "*", "*file3*"} {
			base := filepath.Join(env.base, fmt.Sprintf("restore%d", i+1))
			testRunRestoreExcludes(t, gopts, base, snapshotID, []string{pat})
			for _, test := range testfiles {
				err := testFileSize(filepath.Join(base, "testdata", test.name), int64(test.size))
				if ok, _ := filter.Match(pat, filepath.Base(test.name)); !ok {
					OK(t, err)
				} else {
					Assert(t, os.IsNotExist(errors.Cause(err)),
						"expected %v to not exist in restore step %v, but it exists, err %v", test.name, i+1, err)
				}
			}
		}

	})
}
Exemple #18
0
func getRandomData(seed int, size int) []chunker.Chunk {
	buf := Random(seed, size)
	var chunks []chunker.Chunk
	chunker := chunker.New(bytes.NewReader(buf), testPol)

	for {
		c, err := chunker.Next(nil)
		if errors.Cause(err) == io.EOF {
			break
		}
		chunks = append(chunks, c)
	}

	return chunks
}
Exemple #19
0
// List returns the list of entries found in a pack file.
func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err error) {
	buf, err := readHeader(rd, size)
	if err != nil {
		return nil, err
	}

	n, err := crypto.Decrypt(k, buf, buf)
	if err != nil {
		return nil, err
	}
	buf = buf[:n]

	hdrRd := bytes.NewReader(buf)

	pos := uint(0)
	for {
		e := headerEntry{}
		err = binary.Read(hdrRd, binary.LittleEndian, &e)
		if errors.Cause(err) == io.EOF {
			break
		}

		if err != nil {
			return nil, errors.Wrap(err, "binary.Read")
		}

		entry := restic.Blob{
			Length: uint(e.Length),
			ID:     e.ID,
			Offset: pos,
		}

		switch e.Type {
		case 0:
			entry.Type = restic.DataBlob
		case 1:
			entry.Type = restic.TreeBlob
		default:
			return nil, errors.Errorf("invalid type %d", e.Type)
		}

		entries = append(entries, entry)

		pos += uint(e.Length)
	}

	return entries, nil
}
Exemple #20
0
// filterExisting returns a slice of all existing items, or an error if no
// items exist at all.
func filterExisting(items []string) (result []string, err error) {
	for _, item := range items {
		_, err := fs.Lstat(item)
		if err != nil && os.IsNotExist(errors.Cause(err)) {
			continue
		}

		result = append(result, item)
	}

	if len(result) == 0 {
		return nil, errors.Fatal("all target directories/files do not exist")
	}

	return
}
Exemple #21
0
// LoadAll reads all data stored in the backend for the handle. The buffer buf
// is resized to accomodate all data in the blob. Errors returned by be.Load()
// are passed on, except io.ErrUnexpectedEOF is silenced and nil returned
// instead, since it means this function is working properly.
func LoadAll(be restic.Backend, h restic.Handle, buf []byte) ([]byte, error) {
	fi, err := be.Stat(h)
	if err != nil {
		return nil, errors.Wrap(err, "Stat")
	}

	if fi.Size > int64(len(buf)) {
		buf = make([]byte, int(fi.Size))
	}

	n, err := be.Load(h, buf, 0)
	if errors.Cause(err) == io.ErrUnexpectedEOF {
		err = nil
	}
	buf = buf[:n]
	return buf, err
}
Exemple #22
0
func init() {
	sftpserver := ""

	for _, dir := range strings.Split(TestSFTPPath, ":") {
		testpath := filepath.Join(dir, "sftp-server")
		_, err := os.Stat(testpath)
		if !os.IsNotExist(errors.Cause(err)) {
			sftpserver = testpath
			break
		}
	}

	if sftpserver == "" {
		SkipMessage = "sftp server binary not found, skipping tests"
		return
	}

	args := []string{"-e"}

	test.CreateFn = func() (restic.Backend, error) {
		err := createTempdir()
		if err != nil {
			return nil, err
		}

		return sftp.Create(tempBackendDir, sftpserver, args...)
	}

	test.OpenFn = func() (restic.Backend, error) {
		err := createTempdir()
		if err != nil {
			return nil, err
		}
		return sftp.Open(tempBackendDir, sftpserver, args...)
	}

	test.CleanupFn = func() error {
		if tempBackendDir == "" {
			return nil
		}

		err := os.RemoveAll(tempBackendDir)
		tempBackendDir = ""
		return err
	}
}
Exemple #23
0
// Test returns true if a blob of the given type and name exists in the backend.
func (r *SFTP) Test(t restic.FileType, name string) (bool, error) {
	debug.Log("type %v, name %v", t, name)
	if err := r.clientError(); err != nil {
		return false, err
	}

	_, err := r.c.Lstat(r.filename(t, name))
	if os.IsNotExist(errors.Cause(err)) {
		return false, nil
	}

	if err != nil {
		return false, errors.Wrap(err, "Lstat")
	}

	return true, nil
}
Exemple #24
0
func mount(opts MountOptions, gopts GlobalOptions, mountpoint string) error {
	debug.Log("start mount")
	defer debug.Log("finish mount")

	repo, err := OpenRepository(gopts)
	if err != nil {
		return err
	}

	err = repo.LoadIndex()
	if err != nil {
		return err
	}

	if _, err := resticfs.Stat(mountpoint); os.IsNotExist(errors.Cause(err)) {
		Verbosef("Mountpoint %s doesn't exist, creating it\n", mountpoint)
		err = resticfs.Mkdir(mountpoint, os.ModeDir|0700)
		if err != nil {
			return err
		}
	}
	c, err := systemFuse.Mount(
		mountpoint,
		systemFuse.ReadOnly(),
		systemFuse.FSName("restic"),
	)
	if err != nil {
		return err
	}

	Printf("Now serving the repository at %s\n", mountpoint)
	Printf("Don't forget to umount after quitting!\n")

	root := fs.Tree{}
	root.Add("snapshots", fuse.NewSnapshotsDir(repo, opts.OwnerRoot))

	debug.Log("serving mount at %v", mountpoint)
	err = fs.Serve(c, &root)
	if err != nil {
		return err
	}

	<-c.Ready
	return c.MountError
}
Exemple #25
0
func TestBackupMissingFile2(t *testing.T) {
	withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
		datafile := filepath.Join("testdata", "backup-data.tar.gz")
		fd, err := os.Open(datafile)
		if os.IsNotExist(errors.Cause(err)) {
			t.Skipf("unable to find data file %q, skipping", datafile)
			return
		}
		OK(t, err)
		OK(t, fd.Close())

		SetupTarTestFixture(t, env.testdata, datafile)

		testRunInit(t, gopts)

		globalOptions.stderr = ioutil.Discard
		defer func() {
			globalOptions.stderr = os.Stderr
		}()

		ranHook := false
		debug.Hook("pipe.walk2", func(context interface{}) {
			pathname := context.(string)

			if pathname != filepath.Join("testdata", "0", "0", "9", "37") {
				return
			}

			t.Logf("in hook, removing test file testdata/0/0/9/37")
			ranHook = true

			OK(t, os.Remove(filepath.Join(env.testdata, "0", "0", "9", "37")))
		})

		opts := BackupOptions{}

		testRunBackup(t, []string{env.testdata}, opts, gopts)
		testRunCheck(t, gopts)

		Assert(t, ranHook, "hook did not run")
		debug.RemoveHook("pipe.walk2")
	})
}
Exemple #26
0
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) {
	rd.Seek(0, 0)
	ch := chunker.New(rd, testPol)

	for {
		chunk, err := ch.Next(buf)

		if errors.Cause(err) == io.EOF {
			break
		}

		OK(b, err)

		// reduce length of buf
		Assert(b, uint(len(chunk.Data)) == chunk.Length,
			"invalid length: got %d, expected %d", len(chunk.Data), chunk.Length)

		_, err = crypto.Encrypt(key, buf2, chunk.Data)
		OK(b, err)
	}
}
Exemple #27
0
// SaveFile stores the content of the file on the backend as a Blob by calling
// Save for each chunk.
func (arch *Archiver) SaveFile(p *restic.Progress, node *restic.Node) (*restic.Node, error) {
	file, err := fs.Open(node.Path)
	defer file.Close()
	if err != nil {
		return node, errors.Wrap(err, "Open")
	}

	debug.RunHook("archiver.SaveFile", node.Path)

	node, err = arch.reloadFileIfChanged(node, file)
	if err != nil {
		return node, err
	}

	chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial)
	resultChannels := [](<-chan saveResult){}

	for {
		chunk, err := chnker.Next(getBuf())
		if errors.Cause(err) == io.EOF {
			break
		}

		if err != nil {
			return node, errors.Wrap(err, "chunker.Next")
		}

		resCh := make(chan saveResult, 1)
		go arch.saveChunk(chunk, p, <-arch.blobToken, file, resCh)
		resultChannels = append(resultChannels, resCh)
	}

	results, err := waitForResults(resultChannels)
	if err != nil {
		return node, err
	}
	err = updateNodeContent(node, results)

	return node, err
}
Exemple #28
0
// TestLoad tests the backend's Load function.
func TestLoad(t testing.TB) {
	b := open(t)
	defer close(t)

	_, err := b.Load(restic.Handle{}, nil, 0)
	if err == nil {
		t.Fatalf("Load() did not return an error for invalid handle")
	}

	_, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0)
	if err == nil {
		t.Fatalf("Load() did not return an error for non-existing blob")
	}

	length := rand.Intn(1<<24) + 2000

	data := test.Random(23, length)
	id := restic.Hash(data)

	handle := restic.Handle{Type: restic.DataFile, Name: id.String()}
	err = b.Save(handle, data)
	if err != nil {
		t.Fatalf("Save() error: %v", err)
	}

	for i := 0; i < 50; i++ {
		l := rand.Intn(length + 2000)
		o := rand.Intn(length + 2000)

		d := data
		if o < len(d) {
			d = d[o:]
		} else {
			o = len(d)
			d = d[:0]
		}

		if l > 0 && l < len(d) {
			d = d[:l]
		}

		buf := make([]byte, l)
		n, err := b.Load(handle, buf, int64(o))

		// if we requested data beyond the end of the file, require
		// ErrUnexpectedEOF error
		if l > len(d) {
			if errors.Cause(err) != io.ErrUnexpectedEOF {
				t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
			}
			err = nil
			buf = buf[:len(d)]
		}

		if err != nil {
			t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
			continue
		}

		if n != len(buf) {
			t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
				len(buf), int64(o), len(buf), n)
			continue
		}

		buf = buf[:n]
		if !bytes.Equal(buf, d) {
			t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
			continue
		}
	}

	// test with negative offset
	for i := 0; i < 50; i++ {
		l := rand.Intn(length + 2000)
		o := rand.Intn(length + 2000)

		d := data
		if o < len(d) {
			d = d[len(d)-o:]
		} else {
			o = 0
		}

		if l > 0 && l < len(d) {
			d = d[:l]
		}

		buf := make([]byte, l)
		n, err := b.Load(handle, buf, -int64(o))

		// if we requested data beyond the end of the file, require
		// ErrUnexpectedEOF error
		if l > len(d) {
			if errors.Cause(err) != io.ErrUnexpectedEOF {
				t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o))
				continue
			}
			err = nil
			buf = buf[:len(d)]
		}

		if err != nil {
			t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
			continue
		}

		if n != len(buf) {
			t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
				len(buf), int64(o), len(buf), n)
			continue
		}

		buf = buf[:n]
		if !bytes.Equal(buf, d) {
			t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
			continue
		}
	}

	// load with a too-large buffer, this should return io.ErrUnexpectedEOF
	buf := make([]byte, length+100)
	n, err := b.Load(handle, buf, 0)
	if n != length {
		t.Errorf("wrong length for larger buffer returned, want %d, got %d", length, n)
	}

	if errors.Cause(err) != io.ErrUnexpectedEOF {
		t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
	}

	test.OK(t, b.Remove(restic.DataFile, id.String()))
}
Exemple #29
0
// Repack takes a list of packs together with a list of blobs contained in
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Afterwards, the packs are removed. This operation requires
// an exclusive lock on the repo.
func Repack(repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet) (err error) {
	debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))

	buf := make([]byte, 0, maxPackSize)
	for packID := range packs {
		// load the complete pack
		h := restic.Handle{Type: restic.DataFile, Name: packID.String()}

		l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
		if errors.Cause(err) == io.ErrUnexpectedEOF {
			err = nil
			buf = buf[:l]
		}

		if err != nil {
			return err
		}

		debug.Log("pack %v loaded (%d bytes)", packID.Str(), len(buf))

		blobs, err := pack.List(repo.Key(), bytes.NewReader(buf), int64(len(buf)))
		if err != nil {
			return err
		}

		debug.Log("processing pack %v, blobs: %v", packID.Str(), len(blobs))
		var plaintext []byte
		for _, entry := range blobs {
			h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
			if !keepBlobs.Has(h) {
				continue
			}

			debug.Log("  process blob %v", h)

			ciphertext := buf[entry.Offset : entry.Offset+entry.Length]
			plaintext = plaintext[:len(plaintext)]
			if len(plaintext) < len(ciphertext) {
				plaintext = make([]byte, len(ciphertext))
			}

			debug.Log("  ciphertext %d, plaintext %d", len(plaintext), len(ciphertext))

			n, err := crypto.Decrypt(repo.Key(), plaintext, ciphertext)
			if err != nil {
				return err
			}
			plaintext = plaintext[:n]

			_, err = repo.SaveBlob(entry.Type, plaintext, entry.ID)
			if err != nil {
				return err
			}

			debug.Log("  saved blob %v", entry.ID.Str())

			keepBlobs.Delete(h)
		}
	}

	if err := repo.Flush(); err != nil {
		return err
	}

	for packID := range packs {
		err := repo.Backend().Remove(restic.DataFile, packID.String())
		if err != nil {
			debug.Log("error removing pack %v: %v", packID.Str(), err)
			return err
		}
		debug.Log("removed pack %v", packID.Str())
	}

	return nil
}
Exemple #30
0
func TestBackup(t *testing.T) {
	withTestEnvironment(t, func(env *testEnvironment, gopts GlobalOptions) {
		datafile := filepath.Join("testdata", "backup-data.tar.gz")
		fd, err := os.Open(datafile)
		if os.IsNotExist(errors.Cause(err)) {
			t.Skipf("unable to find data file %q, skipping", datafile)
			return
		}
		OK(t, err)
		OK(t, fd.Close())

		testRunInit(t, gopts)

		SetupTarTestFixture(t, env.testdata, datafile)
		opts := BackupOptions{}

		// first backup
		testRunBackup(t, []string{env.testdata}, opts, gopts)
		snapshotIDs := testRunList(t, "snapshots", gopts)
		Assert(t, len(snapshotIDs) == 1,
			"expected one snapshot, got %v", snapshotIDs)

		testRunCheck(t, gopts)
		stat1 := dirStats(env.repo)

		// second backup, implicit incremental
		testRunBackup(t, []string{env.testdata}, opts, gopts)
		snapshotIDs = testRunList(t, "snapshots", gopts)
		Assert(t, len(snapshotIDs) == 2,
			"expected two snapshots, got %v", snapshotIDs)

		stat2 := dirStats(env.repo)
		if stat2.size > stat1.size+stat1.size/10 {
			t.Error("repository size has grown by more than 10 percent")
		}
		t.Logf("repository grown by %d bytes", stat2.size-stat1.size)

		testRunCheck(t, gopts)
		// third backup, explicit incremental
		opts.Parent = snapshotIDs[0].String()
		testRunBackup(t, []string{env.testdata}, opts, gopts)
		snapshotIDs = testRunList(t, "snapshots", gopts)
		Assert(t, len(snapshotIDs) == 3,
			"expected three snapshots, got %v", snapshotIDs)

		stat3 := dirStats(env.repo)
		if stat3.size > stat1.size+stat1.size/10 {
			t.Error("repository size has grown by more than 10 percent")
		}
		t.Logf("repository grown by %d bytes", stat3.size-stat2.size)

		// restore all backups and compare
		for i, snapshotID := range snapshotIDs {
			restoredir := filepath.Join(env.base, fmt.Sprintf("restore%d", i))
			t.Logf("restoring snapshot %v to %v", snapshotID.Str(), restoredir)
			testRunRestore(t, gopts, restoredir, snapshotIDs[0])
			Assert(t, directoriesEqualContents(env.testdata, filepath.Join(restoredir, "testdata")),
				"directories are not equal")
		}

		testRunCheck(t, gopts)
	})
}