コード例 #1
0
ファイル: index.go プロジェクト: tharrisone/restic
// DecodeIndex loads and unserializes an index from rd.
func DecodeIndex(rd io.Reader) (*Index, error) {
	debug.Log("Index.DecodeIndex", "Start decoding index")
	list := []*packJSON{}

	dec := json.NewDecoder(rd)
	err := dec.Decode(&list)
	if err != nil {
		return nil, err
	}

	idx := NewIndex()
	for _, pack := range list {
		packID, err := backend.ParseID(pack.ID)
		if err != nil {
			debug.Log("Index.DecodeIndex", "error parsing pack ID %q: %v", pack.ID, err)
			return nil, err
		}

		for _, blob := range pack.Blobs {
			blobID, err := backend.ParseID(blob.ID)
			if err != nil {
				debug.Log("Index.DecodeIndex", "error parsing blob ID %q: %v", blob.ID, err)
				return nil, err
			}

			idx.store(blob.Type, blobID, packID, blob.Offset, blob.Length, true)
		}
	}

	debug.Log("Index.DecodeIndex", "done")
	return idx, err
}
コード例 #2
0
ファイル: walk_test.go プロジェクト: marete/restic
func TestDelayedWalkTree(t *testing.T) {
	WithTestEnvironment(t, repoFixture, func(repodir string) {
		repo := OpenLocalRepo(t, repodir)
		OK(t, repo.LoadIndex())

		root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
		OK(t, err)

		dr := delayRepo{repo, 100 * time.Millisecond}

		// start tree walker
		treeJobs := make(chan restic.WalkTreeJob)
		go restic.WalkTree(dr, root, nil, treeJobs)

		i := 0
		for job := range treeJobs {
			expectedPath := filepath.Join(strings.Split(walktreeTestItems[i], "/")...)
			if job.Path != expectedPath {
				t.Fatalf("expected path %q (%v), got %q", walktreeTestItems[i], i, job.Path)
			}
			i++
		}

		if i != len(walktreeTestItems) {
			t.Fatalf("got %d items, expected %v", i, len(walktreeTestItems))
		}
	})
}
コード例 #3
0
ファイル: index.go プロジェクト: rawtaz/restic
// Each returns a channel that yields all blobs known to the index. If done is
// closed, the background goroutine terminates. This blocks any modification of
// the index.
func (idx *Index) Each(done chan struct{}) <-chan pack.Blob {
	idx.m.Lock()

	ch := make(chan pack.Blob)

	go func() {
		defer idx.m.Unlock()
		defer func() {
			close(ch)
		}()

		for ids, blob := range idx.pack {
			id, err := backend.ParseID(ids)
			if err != nil {
				// ignore invalid IDs
				continue
			}

			select {
			case <-done:
				return
			case ch <- pack.Blob{
				ID:     id,
				Offset: blob.offset,
				Type:   blob.tpe,
				Length: uint32(blob.length),
			}:
			}
		}
	}()

	return ch
}
コード例 #4
0
ファイル: helpers.go プロジェクト: rawtaz/restic
func ParseID(s string) backend.ID {
	id, err := backend.ParseID(s)
	if err != nil {
		panic(err)
	}

	return id
}
コード例 #5
0
ファイル: snapshot.go プロジェクト: tharrisone/restic
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
// the string as closely as possible.
func FindSnapshot(repo *repository.Repository, s string) (backend.ID, error) {
	// find snapshot id with prefix
	name, err := backend.Find(repo.Backend(), backend.Snapshot, s)
	if err != nil {
		return nil, err
	}

	return backend.ParseID(name)
}
コード例 #6
0
ファイル: parallel.go プロジェクト: marete/restic
// ParallelWorkFuncParseID converts a function that takes a backend.ID to a
// function that takes a string.
func ParallelWorkFuncParseID(f ParallelIDWorkFunc) ParallelWorkFunc {
	return func(s string, done <-chan struct{}) error {
		id, err := backend.ParseID(s)
		if err != nil {
			return err
		}

		return f(id, done)
	}
}
コード例 #7
0
ファイル: id_test.go プロジェクト: marete/restic
func TestID(t *testing.T) {
	for _, test := range TestStrings {
		id, err := backend.ParseID(test.id)
		OK(t, err)

		id2, err := backend.ParseID(test.id)
		OK(t, err)
		Assert(t, id.Equal(id2), "ID.Equal() does not work as expected")

		ret, err := id.EqualString(test.id)
		OK(t, err)
		Assert(t, ret, "ID.EqualString() returned wrong value")

		// test json marshalling
		buf, err := id.MarshalJSON()
		OK(t, err)
		Equals(t, "\""+test.id+"\"", string(buf))

		var id3 backend.ID
		err = id3.UnmarshalJSON(buf)
		OK(t, err)
		Equals(t, id, id3)
	}
}
コード例 #8
0
ファイル: integration_test.go プロジェクト: rawtaz/restic
func parseIDsFromReader(t testing.TB, rd io.Reader) backend.IDs {
	IDs := backend.IDs{}
	sc := bufio.NewScanner(rd)

	for sc.Scan() {
		id, err := backend.ParseID(sc.Text())
		if err != nil {
			t.Logf("parse id %v: %v", sc.Text(), err)
			continue
		}

		IDs = append(IDs, id)
	}

	return IDs
}
コード例 #9
0
ファイル: cache.go プロジェクト: rawtaz/restic
func (c *Cache) list(t backend.Type) ([]cacheEntry, error) {
	var dir string

	switch t {
	case backend.Snapshot:
		dir = filepath.Join(c.base, "snapshots")
	default:
		return nil, fmt.Errorf("cache not supported for type %v", t)
	}

	fd, err := os.Open(dir)
	if err != nil {
		if os.IsNotExist(err) {
			return []cacheEntry{}, nil
		}
		return nil, err
	}
	defer fd.Close()

	fis, err := fd.Readdir(-1)
	if err != nil {
		return nil, err
	}

	entries := make([]cacheEntry, 0, len(fis))

	for _, fi := range fis {
		parts := strings.SplitN(fi.Name(), ".", 2)

		id, err := backend.ParseID(parts[0])
		// ignore invalid cache entries for now
		if err != nil {
			debug.Log("Cache.List", "unable to parse name %v as id: %v", parts[0], err)
			continue
		}

		e := cacheEntry{ID: id}

		if len(parts) == 2 {
			e.Subtype = parts[1]
		}

		entries = append(entries, e)
	}

	return entries, nil
}
コード例 #10
0
ファイル: checker.go プロジェクト: hzensne1/restic
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs.
func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) {
	var trees struct {
		IDs backend.IDs
		sync.Mutex
	}

	var errs struct {
		errs []error
		sync.Mutex
	}

	snapshotWorker := func(strID string, done <-chan struct{}) error {
		id, err := backend.ParseID(strID)
		if err != nil {
			return err
		}

		debug.Log("Checker.Snaphots", "load snapshot %v", id.Str())

		treeID, err := loadTreeFromSnapshot(repo, id)
		if err != nil {
			errs.Lock()
			errs.errs = append(errs.errs, err)
			errs.Unlock()
			return nil
		}

		debug.Log("Checker.Snaphots", "snapshot %v has tree %v", id.Str(), treeID.Str())
		trees.Lock()
		trees.IDs = append(trees.IDs, treeID)
		trees.Unlock()

		return nil
	}

	err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker)
	if err != nil {
		errs.errs = append(errs.errs, err)
	}

	return trees.IDs, errs.errs
}
コード例 #11
0
ファイル: walk_test.go プロジェクト: marete/restic
func BenchmarkDelayedWalkTree(t *testing.B) {
	WithTestEnvironment(t, repoFixture, func(repodir string) {
		repo := OpenLocalRepo(t, repodir)
		OK(t, repo.LoadIndex())

		root, err := backend.ParseID("937a2f64f736c64ee700c6ab06f840c68c94799c288146a0e81e07f4c94254da")
		OK(t, err)

		dr := delayRepo{repo, 10 * time.Millisecond}

		t.ResetTimer()

		for i := 0; i < t.N; i++ {
			// start tree walker
			treeJobs := make(chan restic.WalkTreeJob)
			go restic.WalkTree(dr, root, nil, treeJobs)

			for _ = range treeJobs {
			}
		}
	})
}
コード例 #12
0
ファイル: index.go プロジェクト: jhautefeuille/restic
// LoadIndexWithDecoder loads the index and decodes it with fn.
func LoadIndexWithDecoder(repo *Repository, id string, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
	debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8])

	idxID, err := backend.ParseID(id)
	if err != nil {
		return nil, err
	}

	buf, err := repo.LoadAndDecrypt(backend.Index, idxID)
	if err != nil {
		return nil, err
	}

	idx, err = fn(bytes.NewReader(buf))
	if err != nil {
		debug.Log("LoadIndexWithDecoder", "error while decoding index %v: %v", id, err)
		return nil, err
	}

	idx.id = idxID

	return idx, nil
}
コード例 #13
0
ファイル: index.go プロジェクト: marete/restic
// LoadIndexWithDecoder loads the index and decodes it with fn.
func LoadIndexWithDecoder(repo *Repository, id string, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
	debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8])

	idxID, err := backend.ParseID(id)
	if err != nil {
		return nil, err
	}

	rd, err := repo.GetDecryptReader(backend.Index, idxID.String())
	if err != nil {
		return nil, err
	}
	defer closeOrErr(rd, &err)

	idx, err = fn(rd)
	if err != nil {
		debug.Log("LoadIndexWithDecoder", "error while decoding index %v: %v", id, err)
		return nil, err
	}

	idx.id = idxID

	return idx, nil
}
コード例 #14
0
ファイル: repository.go プロジェクト: jhautefeuille/restic
func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) {
	defer close(out)
	in := r.be.List(t, done)

	var (
		// disable sending on the outCh until we received a job
		outCh chan<- backend.ID
		// enable receiving from in
		inCh = in
		id   backend.ID
		err  error
	)

	for {
		select {
		case <-done:
			return
		case strID, ok := <-inCh:
			if !ok {
				// input channel closed, we're done
				return
			}
			id, err = backend.ParseID(strID)
			if err != nil {
				// ignore invalid IDs
				continue
			}

			inCh = nil
			outCh = out
		case outCh <- id:
			outCh = nil
			inCh = in
		}
	}
}
コード例 #15
0
ファイル: backend_test.go プロジェクト: rawtaz/restic
func testBackend(b backend.Backend, t *testing.T) {
	for _, tpe := range []backend.Type{
		backend.Data, backend.Key, backend.Lock,
		backend.Snapshot, backend.Index,
	} {
		// detect non-existing files
		for _, test := range TestStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)

			// test if blob is already in repository
			ret, err := b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "blob was found to exist before creating")

			// try to open not existing blob
			_, err = b.Get(tpe, id.String())
			Assert(t, err != nil, "blob data could be extracted before creation")

			// try to get string out, should fail
			ret, err = b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "id %q was found (but should not have)", test.id)
		}

		// add files
		for _, test := range TestStrings {
			// store string in backend
			blob, err := b.Create()
			OK(t, err)

			_, err = blob.Write([]byte(test.data))
			OK(t, err)
			OK(t, blob.Finalize(tpe, test.id))

			// try to get it out again
			rd, err := b.Get(tpe, test.id)
			OK(t, err)
			Assert(t, rd != nil, "Get() returned nil")

			buf, err := ioutil.ReadAll(rd)
			OK(t, err)
			Equals(t, test.data, string(buf))

			// compare content
			Equals(t, test.data, string(buf))
		}

		// test adding the first file again
		test := TestStrings[0]

		// create blob
		blob, err := b.Create()
		OK(t, err)

		_, err = blob.Write([]byte(test.data))
		OK(t, err)
		err = blob.Finalize(tpe, test.id)
		Assert(t, err != nil, "expected error, got %v", err)

		// remove and recreate
		err = b.Remove(tpe, test.id)
		OK(t, err)

		// create blob
		blob, err = b.Create()
		OK(t, err)

		_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
		OK(t, err)
		OK(t, blob.Finalize(tpe, test.id))

		// list items
		IDs := backend.IDs{}

		for _, test := range TestStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)
			IDs = append(IDs, id)
		}

		sort.Sort(IDs)

		i := 0
		for s := range b.List(tpe, nil) {
			Equals(t, IDs[i].String(), s)
			i++
		}

		// remove content if requested
		if TestCleanup {
			for _, test := range TestStrings {
				id, err := backend.ParseID(test.id)
				OK(t, err)

				found, err := b.Test(tpe, id.String())
				OK(t, err)
				Assert(t, found, fmt.Sprintf("id %q was not found before removal", id))

				OK(t, b.Remove(tpe, id.String()))

				found, err = b.Test(tpe, id.String())
				OK(t, err)
				Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
			}
		}

	}
}
コード例 #16
0
ファイル: backend_test.go プロジェクト: marete/restic
func testBackend(b backend.Backend, t *testing.T) {
	testBackendConfig(b, t)

	for _, tpe := range []backend.Type{
		backend.Data, backend.Key, backend.Lock,
		backend.Snapshot, backend.Index,
	} {
		// detect non-existing files
		for _, test := range TestStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)

			// test if blob is already in repository
			ret, err := b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "blob was found to exist before creating")

			// try to open not existing blob
			_, err = b.Get(tpe, id.String())
			Assert(t, err != nil, "blob data could be extracted before creation")

			// try to read not existing blob
			_, err = b.GetReader(tpe, id.String(), 0, 1)
			Assert(t, err != nil, "blob reader could be obtained before creation")

			// try to get string out, should fail
			ret, err = b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "id %q was found (but should not have)", test.id)
		}

		// add files
		for _, test := range TestStrings {
			// store string in backend
			blob, err := b.Create()
			OK(t, err)

			_, err = blob.Write([]byte(test.data))
			OK(t, err)
			OK(t, blob.Finalize(tpe, test.id))

			// try to get it out again
			rd, err := b.Get(tpe, test.id)
			OK(t, err)
			Assert(t, rd != nil, "Get() returned nil")

			// try to read it out again
			reader, err := b.GetReader(tpe, test.id, 0, uint(len(test.data)))
			OK(t, err)
			Assert(t, reader != nil, "GetReader() returned nil")
			bytes := make([]byte, len(test.data))
			reader.Read(bytes)
			Assert(t, test.data == string(bytes), "Read() returned different content")

			// try to read it out with an offset and a length
			readerOffLen, err := b.GetReader(tpe, test.id, 1, uint(len(test.data)-2))
			OK(t, err)
			Assert(t, readerOffLen != nil, "GetReader() returned nil")
			bytesOffLen := make([]byte, len(test.data)-2)
			readerOffLen.Read(bytesOffLen)
			Assert(t, test.data[1:len(test.data)-1] == string(bytesOffLen), "Read() with offset and length returned different content")

			buf, err := ioutil.ReadAll(rd)
			OK(t, err)
			Equals(t, test.data, string(buf))

			// compare content
			Equals(t, test.data, string(buf))
		}

		// test adding the first file again
		test := TestStrings[0]

		// create blob
		blob, err := b.Create()
		OK(t, err)

		_, err = blob.Write([]byte(test.data))
		OK(t, err)
		err = blob.Finalize(tpe, test.id)
		Assert(t, err != nil, "expected error, got %v", err)

		// remove and recreate
		err = b.Remove(tpe, test.id)
		OK(t, err)

		// test that the blob is gone
		ok, err := b.Test(tpe, test.id)
		OK(t, err)
		Assert(t, ok == false, "removed blob still present")

		// create blob
		blob, err = b.Create()
		OK(t, err)

		_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
		OK(t, err)
		OK(t, blob.Finalize(tpe, test.id))

		// list items
		IDs := backend.IDs{}

		for _, test := range TestStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)
			IDs = append(IDs, id)
		}

		sort.Sort(IDs)

		i := 0
		for s := range b.List(tpe, nil) {
			Equals(t, IDs[i].String(), s)
			i++
		}

		// remove content if requested
		if TestCleanup {
			for _, test := range TestStrings {
				id, err := backend.ParseID(test.id)
				OK(t, err)

				found, err := b.Test(tpe, id.String())
				OK(t, err)
				Assert(t, found, fmt.Sprintf("id %q was not found before removal", id))

				OK(t, b.Remove(tpe, id.String()))

				found, err = b.Test(tpe, id.String())
				OK(t, err)
				Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
			}
		}

	}
}
コード例 #17
0
ファイル: checker.go プロジェクト: hzensne1/restic
// LoadIndex loads all index files.
func (c *Checker) LoadIndex() error {
	debug.Log("LoadIndex", "Start")
	type indexRes struct {
		Index *repository.Index
		ID    string
	}

	indexCh := make(chan indexRes)

	worker := func(id string, done <-chan struct{}) error {
		debug.Log("LoadIndex", "worker got index %v", id)
		idx, err := repository.LoadIndex(c.repo, id)
		if err != nil {
			return err
		}

		select {
		case indexCh <- indexRes{Index: idx, ID: id}:
		case <-done:
		}

		return nil
	}

	var perr error
	go func() {
		defer close(indexCh)
		debug.Log("LoadIndex", "start loading indexes in parallel")
		perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, worker)
		debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
	}()

	done := make(chan struct{})
	defer close(done)

	for res := range indexCh {
		debug.Log("LoadIndex", "process index %v", res.ID)
		id, err := backend.ParseID(res.ID)
		if err != nil {
			return err
		}

		c.indexes[id] = res.Index
		c.masterIndex.Merge(res.Index)

		debug.Log("LoadIndex", "process blobs")
		cnt := 0
		for blob := range res.Index.Each(done) {
			c.packs[blob.PackID] = struct{}{}
			c.blobs[blob.ID] = struct{}{}
			c.blobRefs.M[blob.ID] = 0
			cnt++
		}

		debug.Log("LoadIndex", "%d blobs processed", cnt)
	}

	debug.Log("LoadIndex", "done, error %v", perr)

	c.repo.SetIndex(c.masterIndex)

	return perr
}
コード例 #18
0
ファイル: backend_test.go プロジェクト: aut0/restic
func testBackend(b backend.Backend, t *testing.T) {
	testBackendConfig(b, t)

	for _, tpe := range []backend.Type{
		backend.Data, backend.Key, backend.Lock,
		backend.Snapshot, backend.Index,
	} {
		// detect non-existing files
		for _, test := range TestStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)

			// test if blob is already in repository
			ret, err := b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "blob was found to exist before creating")

			// try to open not existing blob
			_, err = b.Get(tpe, id.String())
			Assert(t, err != nil, "blob data could be extracted before creation")

			// try to read not existing blob
			_, err = b.GetReader(tpe, id.String(), 0, 1)
			Assert(t, err != nil, "blob reader could be obtained before creation")

			// try to get string out, should fail
			ret, err = b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "id %q was found (but should not have)", test.id)
		}

		// add files
		for _, test := range TestStrings {
			store(t, b, tpe, []byte(test.data))

			// test Get()
			rd, err := b.Get(tpe, test.id)
			OK(t, err)
			Assert(t, rd != nil, "Get() returned nil")

			read(t, rd, []byte(test.data))
			OK(t, rd.Close())

			// test GetReader()
			rd, err = b.GetReader(tpe, test.id, 0, uint(len(test.data)))
			OK(t, err)
			Assert(t, rd != nil, "GetReader() returned nil")

			read(t, rd, []byte(test.data))
			OK(t, rd.Close())

			// try to read it out with an offset and a length
			start := 1
			end := len(test.data) - 2
			length := end - start
			rd, err = b.GetReader(tpe, test.id, uint(start), uint(length))
			OK(t, err)
			Assert(t, rd != nil, "GetReader() returned nil")

			read(t, rd, []byte(test.data[start:end]))
			OK(t, rd.Close())
		}

		// test adding the first file again
		test := TestStrings[0]

		// create blob
		blob, err := b.Create()
		OK(t, err)

		_, err = blob.Write([]byte(test.data))
		OK(t, err)
		err = blob.Finalize(tpe, test.id)
		Assert(t, err != nil, "expected error, got %v", err)

		// remove and recreate
		err = b.Remove(tpe, test.id)
		OK(t, err)

		// test that the blob is gone
		ok, err := b.Test(tpe, test.id)
		OK(t, err)
		Assert(t, ok == false, "removed blob still present")

		// create blob
		blob, err = b.Create()
		OK(t, err)

		_, err = io.Copy(blob, bytes.NewReader([]byte(test.data)))
		OK(t, err)
		OK(t, blob.Finalize(tpe, test.id))

		// list items
		IDs := backend.IDs{}

		for _, test := range TestStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)
			IDs = append(IDs, id)
		}

		sort.Sort(IDs)

		i := 0
		for s := range b.List(tpe, nil) {
			Equals(t, IDs[i].String(), s)
			i++
		}

		// remove content if requested
		if TestCleanup {
			for _, test := range TestStrings {
				id, err := backend.ParseID(test.id)
				OK(t, err)

				found, err := b.Test(tpe, id.String())
				OK(t, err)

				OK(t, b.Remove(tpe, id.String()))

				found, err = b.Test(tpe, id.String())
				OK(t, err)
				Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
			}
		}
	}

	testGetReader(b, t)
	testWrite(b, t)
}
コード例 #19
0
ファイル: checker.go プロジェクト: jhautefeuille/restic
// LoadIndex loads all index files.
func (c *Checker) LoadIndex() (hints []error, errs []error) {
	debug.Log("LoadIndex", "Start")
	type indexRes struct {
		Index *repository.Index
		ID    string
	}

	indexCh := make(chan indexRes)

	worker := func(id backend.ID, done <-chan struct{}) error {
		debug.Log("LoadIndex", "worker got index %v", id)
		idx, err := repository.LoadIndexWithDecoder(c.repo, id.String(), repository.DecodeIndex)
		if err == repository.ErrOldIndexFormat {
			debug.Log("LoadIndex", "index %v has old format", id.Str())
			hints = append(hints, ErrOldIndexFormat{id})

			idx, err = repository.LoadIndexWithDecoder(c.repo, id.String(), repository.DecodeOldIndex)
		}

		if err != nil {
			return err
		}

		select {
		case indexCh <- indexRes{Index: idx, ID: id.String()}:
		case <-done:
		}

		return nil
	}

	var perr error
	go func() {
		defer close(indexCh)
		debug.Log("LoadIndex", "start loading indexes in parallel")
		perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism,
			repository.ParallelWorkFuncParseID(worker))
		debug.Log("LoadIndex", "loading indexes finished, error: %v", perr)
	}()

	done := make(chan struct{})
	defer close(done)

	if perr != nil {
		errs = append(errs, perr)
		return hints, errs
	}

	packToIndex := make(map[backend.ID]backend.IDSet)

	for res := range indexCh {
		debug.Log("LoadIndex", "process index %v", res.ID)
		idxID, err := backend.ParseID(res.ID)
		if err != nil {
			errs = append(errs, fmt.Errorf("unable to parse as index ID: %v", res.ID))
			continue
		}

		c.indexes[idxID] = res.Index
		c.masterIndex.Insert(res.Index)

		debug.Log("LoadIndex", "process blobs")
		cnt := 0
		for blob := range res.Index.Each(done) {
			c.packs[blob.PackID] = struct{}{}
			c.blobs[blob.ID] = struct{}{}
			c.blobRefs.M[blob.ID] = 0
			cnt++

			if _, ok := packToIndex[blob.PackID]; !ok {
				packToIndex[blob.PackID] = backend.NewIDSet()
			}
			packToIndex[blob.PackID].Insert(idxID)
		}

		debug.Log("LoadIndex", "%d blobs processed", cnt)
	}

	debug.Log("LoadIndex", "done, error %v", perr)

	debug.Log("LoadIndex", "checking for duplicate packs")
	for packID := range c.packs {
		debug.Log("LoadIndex", "  check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID]))
		if len(packToIndex[packID]) > 1 {
			hints = append(hints, ErrDuplicatePacks{
				PackID:  packID,
				Indexes: packToIndex[packID],
			})
		}
	}

	c.repo.SetIndex(c.masterIndex)

	return hints, errs
}
コード例 #20
0
ファイル: cmd_cat.go プロジェクト: marete/restic
func (cmd CmdCat) Execute(args []string) error {
	if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
		return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
	}

	repo, err := cmd.global.OpenRepository()
	if err != nil {
		return err
	}

	lock, err := lockRepo(repo)
	defer unlockRepo(lock)
	if err != nil {
		return err
	}

	tpe := args[0]

	var id backend.ID
	if tpe != "masterkey" && tpe != "config" {
		id, err = backend.ParseID(args[1])
		if err != nil {
			if tpe != "snapshot" {
				return err
			}

			// find snapshot id with prefix
			id, err = restic.FindSnapshot(repo, args[1])
			if err != nil {
				return err
			}
		}
	}

	// handle all types that don't need an index
	switch tpe {
	case "config":
		buf, err := json.MarshalIndent(repo.Config, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "index":
		buf, err := repo.LoadAndDecrypt(backend.Index, id)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(append(buf, '\n'))
		return err

	case "snapshot":
		sn := &restic.Snapshot{}
		err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
		if err != nil {
			return err
		}

		buf, err := json.MarshalIndent(&sn, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))

		return nil
	case "key":
		rd, err := repo.Backend().Get(backend.Key, id.String())
		if err != nil {
			return err
		}

		dec := json.NewDecoder(rd)

		var key repository.Key
		err = dec.Decode(&key)
		if err != nil {
			return err
		}

		buf, err := json.MarshalIndent(&key, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "masterkey":
		buf, err := json.MarshalIndent(repo.Key(), "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "lock":
		lock, err := restic.LoadLock(repo, id)
		if err != nil {
			return err
		}

		buf, err := json.MarshalIndent(&lock, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))

		return nil
	}

	// load index, handle all the other types
	err = repo.LoadIndex()
	if err != nil {
		return err
	}

	switch tpe {
	case "pack":
		rd, err := repo.Backend().Get(backend.Data, id.String())
		if err != nil {
			return err
		}

		_, err = io.Copy(os.Stdout, rd)
		return err

	case "blob":
		blob, err := repo.Index().Lookup(id)
		if err != nil {
			return err
		}

		buf := make([]byte, blob.Length)
		data, err := repo.LoadBlob(blob.Type, id, buf)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(data)
		return err

	case "tree":
		debug.Log("cat", "cat tree %v", id.Str())
		tree := restic.NewTree()
		err = repo.LoadJSONPack(pack.Tree, id, tree)
		if err != nil {
			debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
			return err
		}

		buf, err := json.MarshalIndent(&tree, "", "  ")
		if err != nil {
			debug.Log("cat", "error json.MarshalIndent(): %v", err)
			return err
		}

		_, err = os.Stdout.Write(append(buf, '\n'))
		return nil

	default:
		return errors.New("invalid type")
	}
}
コード例 #21
0
ファイル: tests.go プロジェクト: jhautefeuille/restic
// TestBackend tests all functions of the backend.
func TestBackend(t testing.TB) {
	b := open(t)
	defer close(t)

	for _, tpe := range []backend.Type{
		backend.Data, backend.Key, backend.Lock,
		backend.Snapshot, backend.Index,
	} {
		// detect non-existing files
		for _, test := range testStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)

			// test if blob is already in repository
			ret, err := b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "blob was found to exist before creating")

			// try to stat a not existing blob
			h := backend.Handle{Type: tpe, Name: id.String()}
			_, err = b.Stat(h)
			Assert(t, err != nil, "blob data could be extracted before creation")

			// try to read not existing blob
			_, err = b.Load(h, nil, 0)
			Assert(t, err != nil, "blob reader could be obtained before creation")

			// try to get string out, should fail
			ret, err = b.Test(tpe, id.String())
			OK(t, err)
			Assert(t, !ret, "id %q was found (but should not have)", test.id)
		}

		// add files
		for _, test := range testStrings {
			store(t, b, tpe, []byte(test.data))

			// test Load()
			h := backend.Handle{Type: tpe, Name: test.id}
			buf, err := backend.LoadAll(b, h, nil)
			OK(t, err)
			Equals(t, test.data, string(buf))

			// try to read it out with an offset and a length
			start := 1
			end := len(test.data) - 2
			length := end - start

			buf2 := make([]byte, length)
			n, err := b.Load(h, buf2, int64(start))
			OK(t, err)
			Equals(t, length, n)
			Equals(t, test.data[start:end], string(buf2))
		}

		// test adding the first file again
		test := testStrings[0]

		// create blob
		err := b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
		Assert(t, err != nil, "expected error, got %v", err)

		// remove and recreate
		err = b.Remove(tpe, test.id)
		OK(t, err)

		// test that the blob is gone
		ok, err := b.Test(tpe, test.id)
		OK(t, err)
		Assert(t, ok == false, "removed blob still present")

		// create blob
		err = b.Save(backend.Handle{Type: tpe, Name: test.id}, []byte(test.data))
		OK(t, err)

		// list items
		IDs := backend.IDs{}

		for _, test := range testStrings {
			id, err := backend.ParseID(test.id)
			OK(t, err)
			IDs = append(IDs, id)
		}

		list := backend.IDs{}

		for s := range b.List(tpe, nil) {
			list = append(list, ParseID(s))
		}

		if len(IDs) != len(list) {
			t.Fatalf("wrong number of IDs returned: want %d, got %d", len(IDs), len(list))
		}

		sort.Sort(IDs)
		sort.Sort(list)

		if !reflect.DeepEqual(IDs, list) {
			t.Fatalf("lists aren't equal, want:\n  %v\n  got:\n%v\n", IDs, list)
		}

		// remove content if requested
		if TestCleanupTempDirs {
			for _, test := range testStrings {
				id, err := backend.ParseID(test.id)
				OK(t, err)

				found, err := b.Test(tpe, id.String())
				OK(t, err)

				OK(t, b.Remove(tpe, id.String()))

				found, err = b.Test(tpe, id.String())
				OK(t, err)
				Assert(t, !found, fmt.Sprintf("id %q not found after removal", id))
			}
		}
	}
}