Пример #1
0
// WalkTree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
	debug.Log("WalkTree", "start on %v, start workers", id.Str())

	load := func(id backend.ID) (*Tree, error) {
		tree := &Tree{}
		err := repo.LoadJSONPack(pack.Tree, id, tree)
		if err != nil {
			return nil, err
		}
		return tree, nil
	}

	ch := make(chan loadTreeJob)

	var wg sync.WaitGroup
	for i := 0; i < loadTreeWorkers; i++ {
		wg.Add(1)
		go loadTreeWorker(&wg, ch, load, done)
	}

	tw := NewTreeWalker(ch, jobCh)
	tw.Walk("", id, done)
	close(jobCh)

	close(ch)
	wg.Wait()

	debug.Log("WalkTree", "done")
}
Пример #2
0
// repackBlob loads a single blob from src and saves it in dst.
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
	blob, err := src.Index().Lookup(id)
	if err != nil {
		return err
	}

	debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())

	buf := make([]byte, 0, blob.PlaintextLength())
	buf, err = src.LoadBlob(blob.Type, id, buf)
	if err != nil {
		return err
	}

	if uint(len(buf)) != blob.PlaintextLength() {
		debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
		return errors.New("LoadBlob returned wrong data, len() doesn't match")
	}

	_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
	if err != nil {
		return err
	}

	return nil
}
Пример #3
0
// Walk starts walking the tree given by id. When the channel done is closed,
// processing stops.
func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) {
	debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
	defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)

	resCh := make(chan loadTreeResult, 1)
	tw.ch <- loadTreeJob{
		id:  id,
		res: resCh,
	}

	res := <-resCh
	if res.err != nil {
		select {
		case tw.out <- WalkTreeJob{Path: path, Error: res.err}:
		case <-done:
			return
		}
		return
	}

	tw.walk(path, res.tree, done)

	select {
	case tw.out <- WalkTreeJob{Path: path, Tree: res.tree}:
	case <-done:
		return
	}
}
Пример #4
0
func TestLockRefresh(t *testing.T) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	lock, err := restic.NewLock(repo)
	OK(t, err)

	var lockID *backend.ID
	for id := range repo.List(backend.Lock, nil) {
		if lockID != nil {
			t.Error("more than one lock found")
		}
		lockID = &id
	}

	OK(t, lock.Refresh())

	var lockID2 *backend.ID
	for id := range repo.List(backend.Lock, nil) {
		if lockID2 != nil {
			t.Error("more than one lock found")
		}
		lockID2 = &id
	}

	Assert(t, !lockID.Equal(*lockID2),
		"expected a new ID after lock refresh, got the same")
	OK(t, lock.Unlock())
}
Пример #5
0
func cmdBackupExcludes(t testing.TB, global GlobalOptions, target []string, parentID *backend.ID, excludes []string) {
	cmd := &CmdBackup{global: &global, Excludes: excludes}
	if parentID != nil {
		cmd.Parent = parentID.String()
	}

	t.Logf("backing up %v", target)

	OK(t, cmd.Execute(target))
}
Пример #6
0
// ListPack returns the list of blobs saved in the pack id.
func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, error) {
	h := backend.Handle{Type: backend.Data, Name: id.String()}
	rd := backend.NewReadSeeker(r.Backend(), h)

	unpacker, err := pack.NewUnpacker(r.Key(), rd)
	if err != nil {
		return nil, err
	}

	return unpacker.Entries, nil
}
Пример #7
0
// LoadIndex loads the index id from backend and returns it.
func LoadIndex(repo *Repository, id backend.ID) (*Index, error) {
	idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex)
	if err == nil {
		return idx, nil
	}

	if err == ErrOldIndexFormat {
		fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str())
		return LoadIndexWithDecoder(repo, id, DecodeOldIndex)
	}

	return nil, err
}
Пример #8
0
func (c *Cache) filename(t backend.Type, subtype string, id backend.ID) (string, error) {
	filename := id.String()
	if subtype != "" {
		filename += "." + subtype
	}

	switch t {
	case backend.Snapshot:
		return filepath.Join(c.base, "snapshots", filename), nil
	}

	return "", fmt.Errorf("cache not supported for type %v", t)
}
Пример #9
0
func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) {
	sn, err := restic.LoadSnapshot(repo, id)
	if err != nil {
		debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
		return backend.ID{}, err
	}

	if sn.Tree == nil {
		debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
		return backend.ID{}, fmt.Errorf("snapshot %v has no tree", id)
	}

	return *sn.Tree, nil
}
Пример #10
0
// SetID sets the ID the index has been written to. This requires that
// Finalize() has been called before, otherwise an error is returned.
func (idx *Index) SetID(id backend.ID) error {
	idx.m.Lock()
	defer idx.m.Unlock()

	if !idx.final {
		return errors.New("indexs is not final")
	}

	if !idx.id.IsNull() {
		return errors.New("ID already set")
	}

	debug.Log("Index.SetID", "ID set to %v", id.Str())
	idx.id = id

	return nil
}
Пример #11
0
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error {
	debug.Log("Checker.checkPack", "checking pack %v", id.Str())
	h := backend.Handle{Type: backend.Data, Name: id.String()}
	buf, err := backend.LoadAll(r.Backend(), h, nil)
	if err != nil {
		return err
	}

	hash := backend.Hash(buf)
	if !hash.Equal(id) {
		debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
		return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
	}

	unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf))
	if err != nil {
		return err
	}

	var errs []error
	for i, blob := range unpacker.Entries {
		debug.Log("Checker.checkPack", "  check blob %d: %v", i, blob.ID.Str())

		plainBuf := make([]byte, blob.Length)
		plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
		if err != nil {
			debug.Log("Checker.checkPack", "  error decrypting blob %v: %v", blob.ID.Str(), err)
			errs = append(errs, fmt.Errorf("blob %v: %v", i, err))
			continue
		}

		hash := backend.Hash(plainBuf)
		if !hash.Equal(blob.ID) {
			debug.Log("Checker.checkPack", "  Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
			errs = append(errs, fmt.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
			continue
		}
	}

	if len(errs) > 0 {
		return fmt.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
	}

	return nil
}
Пример #12
0
// SaveFrom encrypts data read from rd and stores it in a pack in the backend as type t.
func (r *Repository) SaveFrom(t pack.BlobType, id *backend.ID, length uint, rd io.Reader) error {
	debug.Log("Repo.SaveFrom", "save id %v (%v, %d bytes)", id.Str(), t, length)
	if id == nil {
		return errors.New("id is nil")
	}

	buf, err := ioutil.ReadAll(rd)
	if err != nil {
		return err
	}

	_, err = r.SaveAndEncrypt(t, buf, id)
	if err != nil {
		return err
	}

	return nil
}
Пример #13
0
func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error {
	tree, err := LoadTree(res.repo, treeID)
	if err != nil {
		return res.Error(dir, nil, errors.Annotate(err, "LoadTree"))
	}

	for _, node := range tree.Nodes {
		selectedForRestore := res.SelectFilter(filepath.Join(dir, node.Name),
			filepath.Join(dst, dir, node.Name), node)
		debug.Log("Restorer.restoreNodeTo", "SelectForRestore returned %v", selectedForRestore)

		if selectedForRestore {
			err := res.restoreNodeTo(node, dir, dst)
			if err != nil {
				return err
			}
		}

		if node.Type == "dir" {
			if node.Subtree == nil {
				return fmt.Errorf("Dir without subtree in tree %v", treeID.Str())
			}

			subp := filepath.Join(dir, node.Name)
			err = res.restoreTo(dst, subp, *node.Subtree)
			if err != nil {
				err = res.Error(subp, node, errors.Annotate(err, "restore subtree"))
				if err != nil {
					return err
				}
			}

			if selectedForRestore {
				// Restore directory timestamp at the end. If we would do it earlier, restoring files within
				// the directory would overwrite the timestamp of the directory they are in.
				if err := node.RestoreTimestamps(filepath.Join(dst, dir, node.Name)); err != nil {
					return err
				}
			}
		}
	}

	return nil
}
Пример #14
0
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
	debug.Log("Checker.checkTree", "checking tree %v", id.Str())

	var blobs []backend.ID

	for _, node := range tree.Nodes {
		switch node.Type {
		case "file":
			for b, blobID := range node.Content {
				if blobID.IsNull() {
					errs = append(errs, Error{TreeID: &id, Err: fmt.Errorf("file %q blob %d has null ID", node.Name, b)})
					continue
				}
				blobs = append(blobs, blobID)
			}
		case "dir":
			if node.Subtree == nil {
				errs = append(errs, Error{TreeID: &id, Err: fmt.Errorf("dir node %q has no subtree", node.Name)})
				continue
			}

			if node.Subtree.IsNull() {
				errs = append(errs, Error{TreeID: &id, Err: fmt.Errorf("dir node %q subtree id is null", node.Name)})
				continue
			}
		}
	}

	for _, blobID := range blobs {
		c.blobRefs.Lock()
		c.blobRefs.M[blobID]++
		debug.Log("Checker.checkTree", "blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
		c.blobRefs.Unlock()

		if _, ok := c.blobs[blobID]; !ok {
			debug.Log("Checker.trees", "tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())

			errs = append(errs, Error{TreeID: &id, BlobID: &blobID, Err: errors.New("not found in index")})
		}
	}

	return errs
}
Пример #15
0
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs.
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) {
	if id == nil {
		// compute plaintext hash
		hashedID := backend.Hash(data)
		id = &hashedID
	}

	debug.Log("Repo.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))

	// get buf from the pool
	ciphertext := getBuf()
	defer freeBuf(ciphertext)

	// encrypt blob
	ciphertext, err := r.Encrypt(ciphertext, data)
	if err != nil {
		return backend.ID{}, err
	}

	// find suitable packer and add blob
	packer, err := r.findPacker(uint(len(ciphertext)))
	if err != nil {
		return backend.ID{}, err
	}

	// save ciphertext
	_, err = packer.Add(t, *id, ciphertext)
	if err != nil {
		return backend.ID{}, err
	}

	// if the pack is not full enough and there are less than maxPackers
	// packers, put back to the list
	if packer.Size() < minPackSize && r.countPacker() < maxPackers {
		debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
		r.insertPacker(packer)
		return *id, nil
	}

	// else write the pack to the backend
	return *id, r.savePacker(packer)
}
Пример #16
0
// ConvertIndex loads the given index from the repo and converts them to the new
// format (if necessary). When the conversion is succcessful, the old index
// is removed. Returned is either the old id (if no conversion was needed) or
// the new id.
func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
	debug.Log("ConvertIndex", "checking index %v", id.Str())

	idx, err := LoadIndexWithDecoder(repo, id, DecodeOldIndex)
	if err != nil {
		debug.Log("ConvertIndex", "LoadIndexWithDecoder(%v) returned error: %v", id.Str(), err)
		return id, err
	}

	buf := bytes.NewBuffer(nil)
	idx.supersedes = backend.IDs{id}

	err = idx.Encode(buf)
	if err != nil {
		debug.Log("ConvertIndex", "oldIdx.Encode() returned error: %v", err)
		return id, err
	}

	return repo.SaveUnpacked(backend.Index, buf.Bytes())
}
Пример #17
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	blob, err := r.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	plaintextBufSize := uint(cap(plaintextBuf))
	if blob.PlaintextLength() > plaintextBufSize {
		debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d",
			blob.PlaintextLength(), plaintextBufSize)
		plaintextBuf = make([]byte, blob.PlaintextLength())
	}

	if blob.Type != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)

	// load blob from pack
	h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
	ciphertextBuf := make([]byte, blob.Length)
	n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
		return nil, err
	}

	if uint(n) != blob.Length {
		debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d",
			blob.Length, uint(n))
		return nil, errors.New("wrong length returned")
	}

	// decrypt
	plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plaintextBuf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plaintextBuf, nil
}
Пример #18
0
// Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) {
	idx.m.Lock()
	defer idx.m.Unlock()

	if p, ok := idx.pack[id]; ok {
		debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
			id.Str(), p.packID.Str(), p.offset, p.length)

		pb := PackedBlob{
			Type:   p.tpe,
			Length: p.length,
			ID:     id,
			Offset: p.offset,
			PackID: p.packID,
		}
		return pb, nil
	}

	debug.Log("Index.Lookup", "id %v not found", id.Str())
	return PackedBlob{}, fmt.Errorf("id %v not found in index", id)
}
Пример #19
0
func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) error {
	debug.Log("restic.find", "searching in snapshot %s\n  for entries within [%s %s]", id.Str(), c.oldest, c.newest)

	sn, err := restic.LoadSnapshot(repo, id)
	if err != nil {
		return err
	}

	results, err := c.findInTree(repo, *sn.Tree, "")
	if err != nil {
		return err
	}

	if len(results) == 0 {
		return nil
	}
	c.global.Verbosef("found %d matching entries in snapshot %s\n", len(results), id)
	for _, res := range results {
		res.node.Name = filepath.Join(res.path, res.node.Name)
		c.global.Printf("  %s\n", res.node)
	}

	return nil
}
Пример #20
0
func TestID(t *testing.T) {
	for _, test := range TestStrings {
		id, err := backend.ParseID(test.id)
		OK(t, err)

		id2, err := backend.ParseID(test.id)
		OK(t, err)
		Assert(t, id.Equal(id2), "ID.Equal() does not work as expected")

		ret, err := id.EqualString(test.id)
		OK(t, err)
		Assert(t, ret, "ID.EqualString() returned wrong value")

		// test json marshalling
		buf, err := id.MarshalJSON()
		OK(t, err)
		Equals(t, "\""+test.id+"\"", string(buf))

		var id3 backend.ID
		err = id3.UnmarshalJSON(buf)
		OK(t, err)
		Equals(t, id, id3)
	}
}
Пример #21
0
// Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
	debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())

	if arch.isKnownBlob(id) {
		debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
		return nil
	}

	err := arch.repo.SaveFrom(t, &id, length, rd)
	if err != nil {
		debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err)
		return err
	}

	debug.Log("Archiver.Save", "Save(%v, %v): new blob\n", t, id.Str())
	return nil
}
Пример #22
0
// Save stores a blob read from rd in the repository.
func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
	debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())

	if arch.isKnownBlob(id) {
		debug.Log("Archiver.Save", "blob %v is known\n", id.Str())
		return nil
	}

	_, err := arch.repo.SaveAndEncrypt(t, data, &id)
	if err != nil {
		debug.Log("Archiver.Save", "Save(%v, %v): error %v\n", t, id.Str(), err)
		return err
	}

	debug.Log("Archiver.Save", "Save(%v, %v): new blob\n", t, id.Str())
	return nil
}
Пример #23
0
// Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id backend.ID) (blob PackedBlob, err error) {
	mi.idxMutex.RLock()
	defer mi.idxMutex.RUnlock()

	debug.Log("MasterIndex.Lookup", "looking up id %v", id.Str())

	for _, idx := range mi.idx {
		blob, err = idx.Lookup(id)
		if err == nil {
			debug.Log("MasterIndex.Lookup",
				"found id %v: %v", id.Str(), blob)
			return
		}
	}

	debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str())
	return PackedBlob{}, fmt.Errorf("id %v not found in any index", id)
}
Пример #24
0
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
	debug.Log("Repo.Load", "load %v with id %v", t, id.Str())

	h := backend.Handle{Type: t, Name: id.String()}
	buf, err := backend.LoadAll(r.be, h, nil)
	if err != nil {
		debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
		return nil, err
	}

	if t != backend.Config && !backend.Hash(buf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	// decrypt
	plain, err := r.Decrypt(buf)
	if err != nil {
		return nil, err
	}

	return plain, nil
}
Пример #25
0
func cmdRestoreIncludes(t testing.TB, global GlobalOptions, dir string, snapshotID backend.ID, includes []string) {
	cmd := &CmdRestore{global: &global, Target: dir, Include: includes}
	OK(t, cmd.Execute([]string{snapshotID.String()}))
}
Пример #26
0
func (cmd CmdBackup) Execute(args []string) error {
	if len(args) == 0 {
		return fmt.Errorf("wrong number of parameters, Usage: %s", cmd.Usage())
	}

	target := make([]string, 0, len(args))
	for _, d := range args {
		if a, err := filepath.Abs(d); err == nil {
			d = a
		}
		target = append(target, d)
	}

	target, err := filterExisting(target)
	if err != nil {
		return err
	}

	repo, err := cmd.global.OpenRepository()
	if err != nil {
		return err
	}

	lock, err := lockRepo(repo)
	defer unlockRepo(lock)
	if err != nil {
		return err
	}

	err = repo.LoadIndex()
	if err != nil {
		return err
	}

	var parentSnapshotID *backend.ID

	// Force using a parent
	if !cmd.Force && cmd.Parent != "" {
		id, err := restic.FindSnapshot(repo, cmd.Parent)
		if err != nil {
			return fmt.Errorf("invalid id %q: %v", cmd.Parent, err)
		}

		parentSnapshotID = &id
	}

	// Find last snapshot to set it as parent, if not already set
	if !cmd.Force && parentSnapshotID == nil {
		id, err := findLatestSnapshot(repo, target)
		if err == nil {
			parentSnapshotID = &id
		} else if err != errNoSnapshotFound {
			return err
		}
	}

	if parentSnapshotID != nil {
		cmd.global.Verbosef("using parent snapshot %v\n", parentSnapshotID.Str())
	}

	cmd.global.Verbosef("scan %v\n", target)

	selectFilter := func(item string, fi os.FileInfo) bool {
		matched, err := filter.List(cmd.Excludes, item)
		if err != nil {
			cmd.global.Warnf("error for exclude pattern: %v", err)
		}

		if matched {
			debug.Log("backup.Execute", "path %q excluded by a filter", item)
		}

		return !matched
	}

	stat, err := restic.Scan(target, selectFilter, cmd.newScanProgress())
	if err != nil {
		return err
	}

	arch := restic.NewArchiver(repo)
	arch.Excludes = cmd.Excludes
	arch.SelectFilter = selectFilter

	arch.Error = func(dir string, fi os.FileInfo, err error) error {
		// TODO: make ignoring errors configurable
		cmd.global.Warnf("\x1b[2K\rerror for %s: %v\n", dir, err)
		return nil
	}

	_, id, err := arch.Snapshot(cmd.newArchiveProgress(stat), target, parentSnapshotID)
	if err != nil {
		return err
	}

	cmd.global.Verbosef("snapshot %s saved\n", id.Str())

	return nil
}
Пример #27
0
func (cmd CmdCat) Execute(args []string) error {
	if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) {
		return fmt.Errorf("type or ID not specified, Usage: %s", cmd.Usage())
	}

	repo, err := cmd.global.OpenRepository()
	if err != nil {
		return err
	}

	lock, err := lockRepo(repo)
	defer unlockRepo(lock)
	if err != nil {
		return err
	}

	tpe := args[0]

	var id backend.ID
	if tpe != "masterkey" && tpe != "config" {
		id, err = backend.ParseID(args[1])
		if err != nil {
			if tpe != "snapshot" {
				return err
			}

			// find snapshot id with prefix
			id, err = restic.FindSnapshot(repo, args[1])
			if err != nil {
				return err
			}
		}
	}

	// handle all types that don't need an index
	switch tpe {
	case "config":
		buf, err := json.MarshalIndent(repo.Config, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "index":
		buf, err := repo.LoadAndDecrypt(backend.Index, id)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(append(buf, '\n'))
		return err

	case "snapshot":
		sn := &restic.Snapshot{}
		err = repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
		if err != nil {
			return err
		}

		buf, err := json.MarshalIndent(&sn, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))

		return nil
	case "key":
		h := backend.Handle{Type: backend.Key, Name: id.String()}
		buf, err := backend.LoadAll(repo.Backend(), h, nil)
		if err != nil {
			return err
		}

		key := &repository.Key{}
		err = json.Unmarshal(buf, key)
		if err != nil {
			return err
		}

		buf, err = json.MarshalIndent(&key, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "masterkey":
		buf, err := json.MarshalIndent(repo.Key(), "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))
		return nil
	case "lock":
		lock, err := restic.LoadLock(repo, id)
		if err != nil {
			return err
		}

		buf, err := json.MarshalIndent(&lock, "", "  ")
		if err != nil {
			return err
		}

		fmt.Println(string(buf))

		return nil
	}

	// load index, handle all the other types
	err = repo.LoadIndex()
	if err != nil {
		return err
	}

	switch tpe {
	case "pack":
		h := backend.Handle{Type: backend.Data, Name: id.String()}
		buf, err := backend.LoadAll(repo.Backend(), h, nil)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(buf)
		return err

	case "blob":
		blob, err := repo.Index().Lookup(id)
		if err != nil {
			return err
		}

		buf := make([]byte, blob.Length)
		data, err := repo.LoadBlob(blob.Type, id, buf)
		if err != nil {
			return err
		}

		_, err = os.Stdout.Write(data)
		return err

	case "tree":
		debug.Log("cat", "cat tree %v", id.Str())
		tree := restic.NewTree()
		err = repo.LoadJSONPack(pack.Tree, id, tree)
		if err != nil {
			debug.Log("cat", "unable to load tree %v: %v", id.Str(), err)
			return err
		}

		buf, err := json.MarshalIndent(&tree, "", "  ")
		if err != nil {
			debug.Log("cat", "error json.MarshalIndent(): %v", err)
			return err
		}

		_, err = os.Stdout.Write(append(buf, '\n'))
		return nil

	default:
		return errors.New("invalid type")
	}
}
Пример #28
0
func removeLock(repo *repository.Repository, id backend.ID) error {
	return repo.Backend().Remove(backend.Lock, id.String())
}
Пример #29
0
func lockExists(repo *repository.Repository, t testing.TB, id backend.ID) bool {
	exists, err := repo.Backend().Test(backend.Lock, id.String())
	OK(t, err)

	return exists
}
Пример #30
0
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
	debug.Log("Checker.checkTree", "checking tree %v", id.Str())

	var blobs []backend.ID

	for _, node := range tree.Nodes {
		switch node.Type {
		case "file":
			if node.Content == nil {
				errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("file %q has nil blob list", node.Name)})
			}

			if node.Mode == 0 {
				errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("file %q has invalid mode: %v", node.Name, node.Mode)})
			}

			for b, blobID := range node.Content {
				if blobID.IsNull() {
					errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("file %q blob %d has null ID", node.Name, b)})
					continue
				}
				blobs = append(blobs, blobID)
			}
		case "dir":
			if node.Subtree == nil {
				errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("dir node %q has no subtree", node.Name)})
				continue
			}

			if node.Subtree.IsNull() {
				errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("dir node %q subtree id is null", node.Name)})
				continue
			}

		case "symlink":
			// nothing to check

		default:
			errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("node %q with invalid type %q", node.Name, node.Type)})
		}

		if node.Name == "" {
			errs = append(errs, Error{TreeID: id, Err: errors.New("node with empty name")})
		}
	}

	for _, blobID := range blobs {
		c.blobRefs.Lock()
		c.blobRefs.M[blobID]++
		debug.Log("Checker.checkTree", "blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
		c.blobRefs.Unlock()

		if !c.blobs.Has(blobID) {
			debug.Log("Checker.trees", "tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())

			errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
		}
	}

	return errs
}