Exemple #1
0
func (c CmdFind) findInSnapshot(repo *repository.Repository, id backend.ID) error {
	debug.Log("restic.find", "searching in snapshot %s\n  for entries within [%s %s]", id.Str(), c.oldest, c.newest)

	sn, err := restic.LoadSnapshot(repo, id)
	if err != nil {
		return err
	}

	results, err := c.findInTree(repo, *sn.Tree, "")
	if err != nil {
		return err
	}

	if len(results) == 0 {
		return nil
	}

	fmt.Printf("found %d matching entries in snapshot %s\n", len(results), id)
	for _, res := range results {
		res.node.Name = filepath.Join(res.path, res.node.Name)
		fmt.Printf("  %s\n", res.node)
	}

	return nil
}
Exemple #2
0
// AddInFlight add the given ID to the list of in-flight IDs. An error is
// returned when the ID is already in the list. Setting ignoreDuplicates to
// true only checks the in flight list, otherwise the index itself is also
// tested.
func (mi *MasterIndex) AddInFlight(id backend.ID, ignoreDuplicates bool) error {
	// The index + inFlight store must be searched for a matching id in one
	// atomic operation. This requires locking the inFlight store and the
	// index together!
	mi.inFlight.Lock()
	defer mi.inFlight.Unlock()

	if !ignoreDuplicates {
		// Note: mi.Has read locks the index again.
		mi.idxMutex.RLock()
		defer mi.idxMutex.RUnlock()
	}

	debug.Log("MasterIndex.AddInFlight", "adding %v", id.Str())
	if mi.inFlight.Has(id) {
		return fmt.Errorf("%v is already in flight", id.Str())
	}

	if !ignoreDuplicates {
		if mi.Has(id) {
			return fmt.Errorf("%v is already indexed (fully processed)", id)
		}
	}

	mi.inFlight.Insert(id)
	return nil
}
Exemple #3
0
// repackBlob loads a single blob from src and saves it in dst.
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
	blob, err := src.Index().Lookup(id)
	if err != nil {
		return err
	}

	debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())

	buf := make([]byte, 0, blob.PlaintextLength())
	buf, err = src.LoadBlob(blob.Type, id, buf)
	if err != nil {
		return err
	}

	if uint(len(buf)) != blob.PlaintextLength() {
		debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
		return errors.New("LoadBlob returned wrong data, len() doesn't match")
	}

	_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
	if err != nil {
		return err
	}

	return nil
}
Exemple #4
0
func walkTree(repo *repository.Repository, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
	debug.Log("walkTree", "start on %q (%v)", path, treeID.Str())

	t, err := LoadTree(repo, treeID)
	if err != nil {
		select {
		case jobCh <- WalkTreeJob{Path: path, Error: err}:
		case <-done:
			return
		}
		return
	}

	for _, node := range t.Nodes {
		p := filepath.Join(path, node.Name)
		if node.Type == "dir" {
			walkTree(repo, p, *node.Subtree, done, jobCh)
		} else {
			select {
			case jobCh <- WalkTreeJob{Path: p, Node: node}:
			case <-done:
				return
			}
		}
	}

	select {
	case jobCh <- WalkTreeJob{Path: path, Tree: t}:
	case <-done:
		return
	}

	debug.Log("walkTree", "done for %q (%v)", path, treeID.Str())
}
Exemple #5
0
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
	debug.Log("Repo.Load", "load %v with id %v", t, id.Str())

	rd, err := r.be.Get(t, id.String())
	if err != nil {
		debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
		return nil, err
	}

	buf, err := ioutil.ReadAll(rd)
	if err != nil {
		return nil, err
	}

	err = rd.Close()
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(buf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	// decrypt
	plain, err := r.Decrypt(buf)
	if err != nil {
		return nil, err
	}

	return plain, nil
}
Exemple #6
0
// WalkTree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
	debug.Log("WalkTree", "start on %v, start workers", id.Str())

	load := func(id backend.ID) (*Tree, error) {
		tree := &Tree{}
		err := repo.LoadJSONPack(pack.Tree, id, tree)
		if err != nil {
			return nil, err
		}
		return tree, nil
	}

	ch := make(chan loadTreeJob)

	var wg sync.WaitGroup
	for i := 0; i < loadTreeWorkers; i++ {
		wg.Add(1)
		go loadTreeWorker(&wg, ch, load, done)
	}

	tw := NewTreeWalker(ch, jobCh)
	tw.Walk("", id, done)
	close(jobCh)

	close(ch)
	wg.Wait()

	debug.Log("WalkTree", "done")
}
Exemple #7
0
// Walk starts walking the tree given by id. When the channel done is closed,
// processing stops.
func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) {
	debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
	defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)

	resCh := make(chan loadTreeResult, 1)
	tw.ch <- loadTreeJob{
		id:  id,
		res: resCh,
	}

	res := <-resCh
	if res.err != nil {
		select {
		case tw.out <- WalkTreeJob{Path: path, Error: res.err}:
		case <-done:
			return
		}
		return
	}

	tw.walk(path, res.tree, done)

	select {
	case tw.out <- WalkTreeJob{Path: path, Tree: res.tree}:
	case <-done:
		return
	}
}
Exemple #8
0
func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error {
	tree, err := LoadTree(res.repo, treeID)
	if err != nil {
		return res.Error(dir, nil, errors.Annotate(err, "LoadTree"))
	}

	for _, node := range tree.Nodes {
		if err := res.restoreNodeTo(node, dir, dst); err != nil {
			return err
		}

		if node.Type == "dir" {
			if node.Subtree == nil {
				return fmt.Errorf("Dir without subtree in tree %v", treeID.Str())
			}

			subp := filepath.Join(dir, node.Name)
			err = res.restoreTo(dst, subp, node.Subtree)
			if err != nil {
				err = res.Error(subp, node, errors.Annotate(err, "restore subtree"))
				if err != nil {
					return err
				}
			}

			// Restore directory timestamp at the end. If we would do it earlier, restoring files within
			// the directory would overwrite the timestamp of the directory they are in.
			if err := node.RestoreTimestamps(filepath.Join(dst, dir, node.Name)); err != nil {
				return err
			}
		}
	}

	return nil
}
Exemple #9
0
// IsInFlight returns true iff the id is contained in the list of in-flight IDs.
func (mi *MasterIndex) IsInFlight(id backend.ID) bool {
	mi.inFlight.RLock()
	defer mi.inFlight.RUnlock()

	inFlight := mi.inFlight.Has(id)
	debug.Log("MasterIndex.IsInFlight", "testing whether %v is in flight: %v", id.Str(), inFlight)

	return inFlight
}
Exemple #10
0
// Store remembers the id and pack in the index.
func (idx *Index) Store(t pack.BlobType, id, pack backend.ID, offset, length uint) {
	idx.m.Lock()
	defer idx.m.Unlock()

	debug.Log("Index.Store", "pack %v contains id %v (%v), offset %v, length %v",
		pack.Str(), id.Str(), t, offset, length)

	idx.store(t, id, pack, offset, length, false)
}
Exemple #11
0
// Remove removes the pack ID from the index.
func (idx *Index) Remove(packID backend.ID) {
	idx.m.Lock()
	defer idx.m.Unlock()

	debug.Log("Index.Remove", "id %v removed", packID.Str())

	s := packID.String()
	if _, ok := idx.pack[s]; ok {
		delete(idx.pack, s)
	}
}
Exemple #12
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	packID, tpe, offset, length, err := r.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	if length > uint(cap(plaintextBuf))+crypto.Extension {
		return nil, fmt.Errorf("buf is too small, need %d more bytes", length-uint(cap(plaintextBuf))-crypto.Extension)
	}

	if tpe != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", tpe, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)

	// load blob from pack
	rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length)
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
		return nil, err
	}

	// make buffer that is large enough for the complete blob
	ciphertextBuf := make([]byte, length)
	_, err = io.ReadFull(rd, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	err = rd.Close()
	if err != nil {
		return nil, err
	}

	// decrypt
	plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plaintextBuf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plaintextBuf, nil
}
Exemple #13
0
// Lookup returns the pack for the id.
func (idx *Index) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) {
	idx.m.Lock()
	defer idx.m.Unlock()

	if p, ok := idx.pack[id.String()]; ok {
		debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
			id.Str(), p.packID.Str(), p.offset, p.length)
		return p.packID, p.tpe, p.offset, p.length, nil
	}

	debug.Log("Index.Lookup", "id %v not found", id.Str())
	return nil, pack.Data, 0, 0, fmt.Errorf("id %v not found in index", id)
}
Exemple #14
0
// Store remembers the id and pack in the index. An existing entry will be
// silently overwritten.
func (idx *Index) Store(t pack.BlobType, id backend.ID, pack backend.ID, offset, length uint) {
	idx.m.Lock()
	defer idx.m.Unlock()

	if idx.final {
		panic("store new item in finalized index")
	}

	debug.Log("Index.Store", "pack %v contains id %v (%v), offset %v, length %v",
		pack.Str(), id.Str(), t, offset, length)

	idx.store(t, id, pack, offset, length)
}
Exemple #15
0
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data is small
// enough, it will be packed together with other small blobs.
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) {
	if id == nil {
		// compute plaintext hash
		hashedID := backend.Hash(data)
		id = &hashedID
	}

	debug.Log("Repo.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))

	// get buf from the pool
	ciphertext := getBuf()
	defer freeBuf(ciphertext)

	// encrypt blob
	ciphertext, err := r.Encrypt(ciphertext, data)
	if err != nil {
		return backend.ID{}, err
	}

	// add this id to the list of in-flight chunk ids.
	debug.Log("Repo.Save", "add %v to list of in-flight IDs", id.Str())
	err = r.idx.AddInFlight(*id)
	if err != nil {
		debug.Log("Repo.Save", "another goroutine is already working on %v (%v) does already exist", id.Str, t)
		return *id, nil
	}

	// find suitable packer and add blob
	packer, err := r.findPacker(uint(len(ciphertext)))
	if err != nil {
		r.idx.RemoveFromInFlight(*id)
		return backend.ID{}, err
	}

	// save ciphertext
	_, err = packer.Add(t, *id, bytes.NewReader(ciphertext))
	if err != nil {
		return backend.ID{}, err
	}

	// if the pack is not full enough and there are less than maxPackers
	// packers, put back to the list
	if packer.Size() < minPackSize && r.countPacker() < maxPackers {
		debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
		r.insertPacker(packer)
		return *id, nil
	}

	// else write the pack to the backend
	return *id, r.savePacker(packer)
}
Exemple #16
0
func loadTreeFromSnapshot(repo *repository.Repository, id backend.ID) (backend.ID, error) {
	sn, err := restic.LoadSnapshot(repo, id)
	if err != nil {
		debug.Log("Checker.loadTreeFromSnapshot", "error loading snapshot %v: %v", id.Str(), err)
		return backend.ID{}, err
	}

	if sn.Tree == nil {
		debug.Log("Checker.loadTreeFromSnapshot", "snapshot %v has no tree", id.Str())
		return backend.ID{}, fmt.Errorf("snapshot %v has no tree", id)
	}

	return *sn.Tree, nil
}
Exemple #17
0
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error {
	debug.Log("Checker.checkPack", "checking pack %v", id.Str())
	rd, err := r.Backend().Get(backend.Data, id.String())
	if err != nil {
		return err
	}

	buf, err := ioutil.ReadAll(rd)
	if err != nil {
		return err
	}

	err = rd.Close()
	if err != nil {
		return err
	}

	unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf))
	if err != nil {
		return err
	}

	var errs []error
	for i, blob := range unpacker.Entries {
		debug.Log("Checker.checkPack", "  check blob %d: %v", i, blob.ID.Str())

		plainBuf := make([]byte, blob.Length)
		plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
		if err != nil {
			debug.Log("Checker.checkPack", "  error decrypting blob %v: %v", blob.ID.Str(), err)
			errs = append(errs, fmt.Errorf("blob %v: %v", i, err))
			continue
		}

		hash := backend.Hash(plainBuf)
		if !hash.Equal(blob.ID) {
			debug.Log("Checker.checkPack", "  ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
			errs = append(errs, fmt.Errorf("ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
			continue
		}
	}

	if len(errs) > 0 {
		return fmt.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
	}

	return nil
}
Exemple #18
0
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
	debug.Log("Checker.checkTree", "checking tree %v", id.Str())

	// if _, ok := c.blobs[id2map(id)]; !ok {
	// 	errs = append(errs, Error{TreeID: id, Err: errors.New("not found in index")})
	// }

	// blobs, subtrees, treeErrors := c.tree(id)
	// if treeErrors != nil {
	// 	debug.Log("Checker.trees", "error checking tree %v: %v", id.Str(), treeErrors)
	// 	errs = append(errs, treeErrors...)
	// 	continue
	// }

	// treeIDs = append(treeIDs, subtrees...)

	// treesChecked[id2map(id)] = struct{}{}

	var blobs []backend.ID

	for i, node := range tree.Nodes {
		switch node.Type {
		case "file":
			blobs = append(blobs, node.Content...)
		case "dir":
			if node.Subtree == nil {
				errs = append(errs, Error{TreeID: id, Err: fmt.Errorf("node %d is dir but has no subtree", i)})
				continue
			}
		}
	}

	for _, blobID := range blobs {
		mid := id2map(blobID)
		c.blobRefs.Lock()
		c.blobRefs.M[mid]++
		debug.Log("Checker.checkTree", "blob %v refcount %d", blobID.Str(), c.blobRefs.M[mid])
		c.blobRefs.Unlock()

		if _, ok := c.blobs[id2map(blobID)]; !ok {
			debug.Log("Checker.trees", "tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())

			errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
		}
	}

	return errs
}
Exemple #19
0
// SetID sets the ID the index has been written to. This requires that
// Finalize() has been called before, otherwise an error is returned.
func (idx *Index) SetID(id backend.ID) error {
	idx.m.Lock()
	defer idx.m.Unlock()

	if !idx.final {
		return errors.New("indexs is not final")
	}

	if !idx.id.IsNull() {
		return errors.New("ID already set")
	}

	debug.Log("Index.SetID", "ID set to %v", id.Str())
	idx.id = id

	return nil
}
Exemple #20
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend.
func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	packID, tpe, offset, length, err := s.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	if tpe != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", tpe, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)

	// load blob from pack
	rd, err := s.be.GetReader(backend.Data, packID.String(), offset, length)
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
		return nil, err
	}

	buf, err := ioutil.ReadAll(rd)
	if err != nil {
		return nil, err
	}

	err = rd.Close()
	if err != nil {
		return nil, err
	}

	// decrypt
	plain, err := s.Decrypt(buf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plain).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plain, nil
}
Exemple #21
0
// Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id backend.ID) (blob PackedBlob, err error) {
	mi.idxMutex.RLock()
	defer mi.idxMutex.RUnlock()

	debug.Log("MasterIndex.Lookup", "looking up id %v", id.Str())

	for _, idx := range mi.idx {
		blob, err = idx.Lookup(id)
		if err == nil {
			debug.Log("MasterIndex.Lookup",
				"found id %v: %v", blob)
			return
		}
	}

	debug.Log("MasterIndex.Lookup", "id %v not found in any index", id.Str())
	return PackedBlob{}, fmt.Errorf("id %v not found in any index", id)
}
Exemple #22
0
// SaveFrom encrypts data read from rd and stores it in a pack in the backend as type t.
func (r *Repository) SaveFrom(t pack.BlobType, id *backend.ID, length uint, rd io.Reader) error {
	debug.Log("Repo.SaveFrom", "save id %v (%v, %d bytes)", id.Str(), t, length)
	if id == nil {
		return errors.New("id is nil")
	}

	buf, err := ioutil.ReadAll(rd)
	if err != nil {
		return err
	}

	_, err = r.SaveAndEncrypt(t, buf, id)
	if err != nil {
		return err
	}

	return nil
}
Exemple #23
0
// IsInFlight returns true iff the id is contained in the list of in-flight IDs.
func (mi *MasterIndex) IsInFlight(id backend.ID) bool {
	// The index + inFlight store must be searched for a matching id in one
	// atomic operation. This requires locking the inFlight store and the
	// index together!
	mi.inFlight.RLock()
	defer mi.inFlight.RUnlock()

	mi.idxMutex.RLock()
	defer mi.idxMutex.RUnlock()

	inFlight := mi.inFlight.Has(id)
	debug.Log("MasterIndex.IsInFlight", "testing whether %v is in flight: %v", id.Str(), inFlight)

	indexed := mi.Has(id)
	debug.Log("MasterIndex.IsInFlight", "testing whether %v is indexed (fully processed): %v", id.Str(), indexed)

	return inFlight
}
Exemple #24
0
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data is small
// enough, it will be packed together with other small blobs.
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) {
	if id == nil {
		// compute plaintext hash
		hashedID := backend.Hash(data)
		id = &hashedID
	}

	debug.Log("Repo.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))

	// get buf from the pool
	ciphertext := getBuf()
	defer freeBuf(ciphertext)

	// encrypt blob
	ciphertext, err := r.Encrypt(ciphertext, data)
	if err != nil {
		return backend.ID{}, err
	}

	// find suitable packer and add blob
	packer, err := r.findPacker(uint(len(ciphertext)))
	if err != nil {
		return backend.ID{}, err
	}

	// save ciphertext
	packer.Add(t, *id, bytes.NewReader(ciphertext))

	// add this id to the index, although we don't know yet in which pack it
	// will be saved, the entry will be updated when the pack is written.
	r.idx.Store(t, *id, nil, 0, 0)
	debug.Log("Repo.Save", "saving stub for %v (%v) in index", id.Str, t)

	// if the pack is not full enough and there are less than maxPackers
	// packers, put back to the list
	if packer.Size() < minPackSize && r.countPacker() < maxPackers {
		debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
		r.insertPacker(packer)
		return *id, nil
	}

	// else write the pack to the backend
	return *id, r.savePacker(packer)
}
Exemple #25
0
func (c *Checker) checkTree(id backend.ID, tree *restic.Tree) (errs []error) {
	debug.Log("Checker.checkTree", "checking tree %v", id.Str())

	var blobs []backend.ID

	for _, node := range tree.Nodes {
		switch node.Type {
		case "file":
			for b, blobID := range node.Content {
				if blobID.IsNull() {
					errs = append(errs, Error{TreeID: &id, Err: fmt.Errorf("file %q blob %d has null ID", node.Name, b)})
					continue
				}
				blobs = append(blobs, blobID)
			}
		case "dir":
			if node.Subtree == nil {
				errs = append(errs, Error{TreeID: &id, Err: fmt.Errorf("dir node %q has no subtree", node.Name)})
				continue
			}

			if node.Subtree.IsNull() {
				errs = append(errs, Error{TreeID: &id, Err: fmt.Errorf("dir node %q subtree id is null", node.Name)})
				continue
			}
		}
	}

	for _, blobID := range blobs {
		c.blobRefs.Lock()
		c.blobRefs.M[blobID]++
		debug.Log("Checker.checkTree", "blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
		c.blobRefs.Unlock()

		if _, ok := c.blobs[blobID]; !ok {
			debug.Log("Checker.trees", "tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())

			errs = append(errs, Error{TreeID: &id, BlobID: &blobID, Err: errors.New("not found in index")})
		}
	}

	return errs
}
Exemple #26
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	blob, err := r.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	plaintextBufSize := uint(cap(plaintextBuf))
	if blob.PlaintextLength() > plaintextBufSize {
		return nil, fmt.Errorf("buf is too small, need %d more bytes", blob.PlaintextLength()-plaintextBufSize)
	}

	if blob.Type != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)

	// load blob from pack
	rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length)
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
		return nil, err
	}

	// make buffer that is large enough for the complete blob
	ciphertextBuf := make([]byte, blob.Length)
	_, err = io.ReadFull(rd, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	err = rd.Close()
	if err != nil {
		return nil, err
	}

	// decrypt
	plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plaintextBuf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plaintextBuf, nil
}
Exemple #27
0
// ConvertIndex loads the given index from the repo and converts them to the new
// format (if necessary). When the conversion is succcessful, the old index
// is removed. Returned is either the old id (if no conversion was needed) or
// the new id.
func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
	debug.Log("ConvertIndex", "checking index %v", id.Str())

	idx, err := LoadIndexWithDecoder(repo, id.String(), DecodeOldIndex)
	if err != nil {
		debug.Log("ConvertIndex", "LoadIndexWithDecoder(%v) returned error: %v", id.Str(), err)
		return id, err
	}

	buf := bytes.NewBuffer(nil)
	idx.supersedes = backend.IDs{id}

	err = idx.Encode(buf)
	if err != nil {
		debug.Log("ConvertIndex", "oldIdx.Encode() returned error: %v", err)
		return id, err
	}

	return repo.SaveUnpacked(backend.Index, buf.Bytes())
}
Exemple #28
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	blob, err := r.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	plaintextBufSize := uint(cap(plaintextBuf))
	if blob.PlaintextLength() > plaintextBufSize {
		debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d",
			blob.PlaintextLength(), plaintextBufSize)
		plaintextBuf = make([]byte, blob.PlaintextLength())
	}

	if blob.Type != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)

	// load blob from pack
	h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
	ciphertextBuf := make([]byte, blob.Length)
	n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
		return nil, err
	}

	if uint(n) != blob.Length {
		debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d",
			blob.Length, uint(n))
		return nil, errors.New("wrong length returned")
	}

	// decrypt
	plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plaintextBuf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plaintextBuf, nil
}
Exemple #29
0
// Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id backend.ID) (pb PackedBlob, err error) {
	idx.m.Lock()
	defer idx.m.Unlock()

	if p, ok := idx.pack[id]; ok {
		debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
			id.Str(), p.packID.Str(), p.offset, p.length)

		pb := PackedBlob{
			Type:   p.tpe,
			Length: p.length,
			ID:     id,
			Offset: p.offset,
			PackID: p.packID,
		}
		return pb, nil
	}

	debug.Log("Index.Lookup", "id %v not found", id.Str())
	return PackedBlob{}, fmt.Errorf("id %v not found in index", id)
}
Exemple #30
0
// SaveIndex saves all new packs in the index in the backend, returned is the
// storage ID.
func (r *Repository) SaveIndex() (backend.ID, error) {
	debug.Log("Repo.SaveIndex", "Saving index")

	// create blob
	blob, err := r.be.Create()
	if err != nil {
		return backend.ID{}, err
	}

	debug.Log("Repo.SaveIndex", "create new pack %p", blob)

	// hash
	hw := backend.NewHashingWriter(blob, sha256.New())

	// encrypt blob
	ewr := crypto.EncryptTo(r.key, hw)

	err = r.idx.Encode(ewr)
	if err != nil {
		return backend.ID{}, err
	}

	err = ewr.Close()
	if err != nil {
		return backend.ID{}, err
	}

	// finalize blob in the backend
	sid := backend.ID{}
	copy(sid[:], hw.Sum(nil))

	err = blob.Finalize(backend.Index, sid.String())
	if err != nil {
		return backend.ID{}, err
	}

	debug.Log("Repo.SaveIndex", "Saved index as %v", sid.Str())

	return sid, nil
}