Example #1
0
// repackBlob loads a single blob from src and saves it in dst.
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
	blob, err := src.Index().Lookup(id)
	if err != nil {
		return err
	}

	debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())

	buf := make([]byte, 0, blob.PlaintextLength())
	buf, err = src.LoadBlob(blob.Type, id, buf)
	if err != nil {
		return err
	}

	if uint(len(buf)) != blob.PlaintextLength() {
		debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
		return errors.New("LoadBlob returned wrong data, len() doesn't match")
	}

	_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
	if err != nil {
		return err
	}

	return nil
}
Example #2
0
func (node Node) createFileAt(path string, repo *repository.Repository) error {
	f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
	defer f.Close()

	if err != nil {
		return errors.Annotate(err, "OpenFile")
	}

	var buf []byte
	for _, id := range node.Content {
		blob, err := repo.Index().Lookup(id)
		if err != nil {
			return err
		}

		buf = buf[:cap(buf)]
		if uint(len(buf)) < blob.Length {
			buf = make([]byte, blob.Length)
		}

		buf, err := repo.LoadBlob(pack.Data, id, buf)
		if err != nil {
			return errors.Annotate(err, "Load")
		}

		_, err = f.Write(buf)
		if err != nil {
			return errors.Annotate(err, "Write")
		}
	}

	return nil
}
Example #3
0
func printTrees(repo *repository.Repository, wr io.Writer) error {
	done := make(chan struct{})
	defer close(done)

	trees := []backend.ID{}

	for _, idx := range repo.Index().All() {
		for blob := range idx.Each(nil) {
			if blob.Type != pack.Tree {
				continue
			}

			trees = append(trees, blob.ID)
		}
	}

	for _, id := range trees {
		tree, err := restic.LoadTree(repo, id)
		if err != nil {
			fmt.Fprintf(os.Stderr, "LoadTree(%v): %v", id.Str(), err)
			continue
		}

		fmt.Fprintf(wr, "tree_id: %v\n", id)

		prettyPrintJSON(wr, tree)
	}

	return nil
}
Example #4
0
// FindBlobsForPacks returns the set of blobs contained in a pack of packs.
func FindBlobsForPacks(repo *repository.Repository, packs backend.IDSet) (backend.IDSet, error) {
	blobs := backend.NewIDSet()

	for packID := range packs {
		for _, packedBlob := range repo.Index().ListPack(packID) {
			blobs.Insert(packedBlob.ID)
		}
	}

	return blobs, nil
}
Example #5
0
// FindPacksForBlobs returns the set of packs that contain the blobs.
func FindPacksForBlobs(repo *repository.Repository, blobs backend.IDSet) (backend.IDSet, error) {
	packs := backend.NewIDSet()
	idx := repo.Index()
	for id := range blobs {
		blob, err := idx.Lookup(id)
		if err != nil {
			return nil, err
		}

		packs.Insert(blob.PackID)
	}

	return packs, nil
}
Example #6
0
// saveTreeJSON stores a tree in the repository.
func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) {
	data, err := json.Marshal(item)
	if err != nil {
		return backend.ID{}, err
	}
	data = append(data, '\n')

	// check if tree has been saved before
	id := backend.Hash(data)
	if repo.Index().Has(id) {
		return id, nil
	}

	return repo.SaveJSON(pack.Tree, item)
}
Example #7
0
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
	debug.Log("ArchiveReader", "start archiving %s", name)
	sn, err := NewSnapshot([]string{name})
	if err != nil {
		return nil, backend.ID{}, err
	}

	p.Start()
	defer p.Done()

	chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)

	var ids backend.IDs
	var fileSize uint64

	for {
		chunk, err := chnker.Next(getBuf())
		if err == io.EOF {
			break
		}

		if err != nil {
			return nil, backend.ID{}, err
		}

		id := backend.Hash(chunk.Data)

		if !repo.Index().Has(id) {
			_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
			if err != nil {
				return nil, backend.ID{}, err
			}
			debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
		} else {
			debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
		}

		freeBuf(chunk.Data)

		ids = append(ids, id)

		p.Report(Stat{Bytes: uint64(chunk.Length)})
		fileSize += uint64(chunk.Length)
	}

	tree := &Tree{
		Nodes: []*Node{
			&Node{
				Name:       name,
				AccessTime: time.Now(),
				ModTime:    time.Now(),
				Type:       "file",
				Mode:       0644,
				Size:       fileSize,
				UID:        sn.UID,
				GID:        sn.GID,
				User:       sn.Username,
				Content:    ids,
			},
		},
	}

	treeID, err := saveTreeJSON(repo, tree)
	if err != nil {
		return nil, backend.ID{}, err
	}
	sn.Tree = &treeID
	debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())

	id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
	if err != nil {
		return nil, backend.ID{}, err
	}

	sn.id = &id
	debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())

	err = repo.Flush()
	if err != nil {
		return nil, backend.ID{}, err
	}

	err = repo.SaveIndex()
	if err != nil {
		return nil, backend.ID{}, err
	}

	return sn, id, nil
}