コード例 #1
0
ファイル: repository.go プロジェクト: ckemper67/restic
// LoadTree loads a tree from the repository.
func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) {
	debug.Log("load tree %v", id.Str())

	size, err := r.idx.LookupSize(id, restic.TreeBlob)
	if err != nil {
		return nil, err
	}

	debug.Log("size is %d, create buffer", size)
	buf := make([]byte, size)

	n, err := r.loadBlob(id, restic.TreeBlob, buf)
	if err != nil {
		return nil, err
	}
	buf = buf[:n]

	t := &restic.Tree{}
	err = json.Unmarshal(buf, t)
	if err != nil {
		return nil, err
	}

	return t, nil
}
コード例 #2
0
ファイル: lock.go プロジェクト: restic/restic
func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock, error) {
	lockFn := restic.NewLock
	if exclusive {
		lockFn = restic.NewExclusiveLock
	}

	lock, err := lockFn(repo)
	if err != nil {
		return nil, err
	}
	debug.Log("create lock %p (exclusive %v)", lock, exclusive)

	globalLocks.Lock()
	if globalLocks.cancelRefresh == nil {
		debug.Log("start goroutine for lock refresh")
		globalLocks.cancelRefresh = make(chan struct{})
		globalLocks.refreshWG = sync.WaitGroup{}
		globalLocks.refreshWG.Add(1)
		go refreshLocks(&globalLocks.refreshWG, globalLocks.cancelRefresh)
	}

	globalLocks.locks = append(globalLocks.locks, lock)
	globalLocks.Unlock()

	return lock, err
}
コード例 #3
0
ファイル: lock.go プロジェクト: ckemper67/restic
func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
	debug.Log("start")
	defer func() {
		wg.Done()
		globalLocks.Lock()
		globalLocks.cancelRefresh = nil
		globalLocks.Unlock()
	}()

	ticker := time.NewTicker(refreshInterval)

	for {
		select {
		case <-done:
			debug.Log("terminate")
			return
		case <-ticker.C:
			debug.Log("refreshing locks")
			globalLocks.Lock()
			for _, lock := range globalLocks.locks {
				err := lock.Refresh()
				if err != nil {
					fmt.Fprintf(os.Stderr, "unable to refresh lock: %v\n", err)
				}
			}
			globalLocks.Unlock()
		}
	}
}
コード例 #4
0
ファイル: sftp.go プロジェクト: fawick/restic
// Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
	if err := h.Valid(); err != nil {
		return err
	}

	filename, tmpfile, err := r.tempFile()
	debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename)

	n, err := tmpfile.Write(p)
	if err != nil {
		return err
	}

	if n != len(p) {
		return errors.New("not all bytes writen")
	}

	err = tmpfile.Close()
	if err != nil {
		return err
	}

	err = r.renameFile(filename, h.Type, h.Name)
	debug.Log("sftp.Save", "save %v: rename %v: %v",
		h, filepath.Base(filename), err)
	if err != nil {
		return fmt.Errorf("sftp: renameFile: %v", err)
	}

	return nil
}
コード例 #5
0
ファイル: walk.go プロジェクト: ckemper67/restic
// Tree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func Tree(repo TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) {
	debug.Log("start on %v, start workers", id.Str())

	load := func(id restic.ID) (*restic.Tree, error) {
		tree, err := repo.LoadTree(id)
		if err != nil {
			return nil, err
		}
		return tree, nil
	}

	ch := make(chan loadTreeJob)

	var wg sync.WaitGroup
	for i := 0; i < loadTreeWorkers; i++ {
		wg.Add(1)
		go loadTreeWorker(&wg, ch, load, done)
	}

	tw := NewTreeWalker(ch, jobCh)
	tw.Walk("", id, done)
	close(jobCh)

	close(ch)
	wg.Wait()

	debug.Log("done")
}
コード例 #6
0
ファイル: lock.go プロジェクト: ckemper67/restic
// Stale returns true if the lock is stale. A lock is stale if the timestamp is
// older than 30 minutes or if it was created on the current machine and the
// process isn't alive any more.
func (l *Lock) Stale() bool {
	debug.Log("testing if lock %v for process %d is stale", l, l.PID)
	if time.Since(l.Time) > staleTimeout {
		debug.Log("lock is stale, timestamp is too old: %v\n", l.Time)
		return true
	}

	hn, err := os.Hostname()
	if err != nil {
		debug.Log("unable to find current hostnanme: %v", err)
		// since we cannot find the current hostname, assume that the lock is
		// not stale.
		return false
	}

	if hn != l.Hostname {
		// lock was created on a different host, assume the lock is not stale.
		return false
	}

	// check if we can reach the process retaining the lock
	exists := l.processExists()
	if !exists {
		debug.Log("could not reach process, %d, lock is probably stale\n", l.PID)
		return true
	}

	debug.Log("lock not stale\n")
	return false
}
コード例 #7
0
ファイル: repacker.go プロジェクト: fawick/restic
// repackBlob loads a single blob from src and saves it in dst.
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
	blob, err := src.Index().Lookup(id)
	if err != nil {
		return err
	}

	debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())

	buf := make([]byte, 0, blob.PlaintextLength())
	buf, err = src.LoadBlob(blob.Type, id, buf)
	if err != nil {
		return err
	}

	if uint(len(buf)) != blob.PlaintextLength() {
		debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
		return errors.New("LoadBlob returned wrong data, len() doesn't match")
	}

	_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
	if err != nil {
		return err
	}

	return nil
}
コード例 #8
0
ファイル: index.go プロジェクト: ckemper67/restic
// Lookup queries the index for the blob ID and returns a restic.PackedBlob.
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) {
	idx.m.Lock()
	defer idx.m.Unlock()

	h := restic.BlobHandle{ID: id, Type: tpe}

	if packs, ok := idx.pack[h]; ok {
		blobs = make([]restic.PackedBlob, 0, len(packs))

		for _, p := range packs {
			debug.Log("id %v found in pack %v at %d, length %d",
				id.Str(), p.packID.Str(), p.offset, p.length)

			blob := restic.PackedBlob{
				Blob: restic.Blob{
					Type:   tpe,
					Length: p.length,
					ID:     id,
					Offset: p.offset,
				},
				PackID: p.packID,
			}

			blobs = append(blobs, blob)
		}

		return blobs, nil
	}

	debug.Log("id %v not found", id.Str())
	return nil, errors.Errorf("id %v not found in index", id)
}
コード例 #9
0
ファイル: s3.go プロジェクト: ckemper67/restic
// Open opens the S3 backend at bucket and region. The bucket is created if it
// does not exist yet.
func Open(cfg Config) (restic.Backend, error) {
	debug.Log("open, config %#v", cfg)

	client, err := minio.New(cfg.Endpoint, cfg.KeyID, cfg.Secret, !cfg.UseHTTP)
	if err != nil {
		return nil, errors.Wrap(err, "minio.New")
	}

	be := &s3{client: client, bucketname: cfg.Bucket, prefix: cfg.Prefix}
	be.createConnections()

	ok, err := client.BucketExists(cfg.Bucket)
	if err != nil {
		debug.Log("BucketExists(%v) returned err %v, trying to create the bucket", cfg.Bucket, err)
		return nil, errors.Wrap(err, "client.BucketExists")
	}

	if !ok {
		// create new bucket with default ACL in default region
		err = client.MakeBucket(cfg.Bucket, "")
		if err != nil {
			return nil, errors.Wrap(err, "client.MakeBucket")
		}
	}

	return be, nil
}
コード例 #10
0
ファイル: s3.go プロジェクト: ckemper67/restic
// Stat returns information about a blob.
func (be s3) Stat(h restic.Handle) (bi restic.FileInfo, err error) {
	debug.Log("%v", h)

	path := be.s3path(h.Type, h.Name)
	var obj *minio.Object

	obj, err = be.client.GetObject(be.bucketname, path)
	if err != nil {
		debug.Log("GetObject() err %v", err)
		return restic.FileInfo{}, errors.Wrap(err, "client.GetObject")
	}

	// make sure that the object is closed properly.
	defer func() {
		e := obj.Close()
		if err == nil {
			err = errors.Wrap(e, "Close")
		}
	}()

	fi, err := obj.Stat()
	if err != nil {
		debug.Log("Stat() err %v", err)
		return restic.FileInfo{}, errors.Wrap(err, "Stat")
	}

	return restic.FileInfo{Size: fi.Size}, nil
}
コード例 #11
0
ファイル: s3.go プロジェクト: ckemper67/restic
// Save stores data in the backend at the handle.
func (be s3) Save(h restic.Handle, p []byte) (err error) {
	if err := h.Valid(); err != nil {
		return err
	}

	debug.Log("%v with %d bytes", h, len(p))

	path := be.s3path(h.Type, h.Name)

	// Check key does not already exist
	_, err = be.client.StatObject(be.bucketname, path)
	if err == nil {
		debug.Log("%v already exists", h)
		return errors.New("key already exists")
	}

	<-be.connChan
	defer func() {
		be.connChan <- struct{}{}
	}()

	debug.Log("PutObject(%v, %v, %v, %v)",
		be.bucketname, path, int64(len(p)), "binary/octet-stream")
	n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
	debug.Log("%v -> %v bytes, err %#v", path, n, err)

	return errors.Wrap(err, "client.PutObject")
}
コード例 #12
0
ファイル: packer_manager.go プロジェクト: ckemper67/restic
// findPacker returns a packer for a new blob of size bytes. Either a new one is
// created or one is returned that already has some blobs.
func (r *packerManager) findPacker(size uint) (packer *pack.Packer, err error) {
	r.pm.Lock()
	defer r.pm.Unlock()

	// search for a suitable packer
	if len(r.packs) > 0 {
		debug.Log("searching packer for %d bytes\n", size)
		for i, p := range r.packs {
			if p.Size()+size < maxPackSize {
				debug.Log("found packer %v", p)
				// remove from list
				r.packs = append(r.packs[:i], r.packs[i+1:]...)
				return p, nil
			}
		}
	}

	// no suitable packer found, return new
	debug.Log("create new pack for %d bytes", size)
	tmpfile, err := ioutil.TempFile("", "restic-temp-pack-")
	if err != nil {
		return nil, errors.Wrap(err, "ioutil.TempFile")
	}

	return pack.NewPacker(r.key, tmpfile), nil
}
コード例 #13
0
ファイル: index.go プロジェクト: fawick/restic
// Dump writes the pretty-printed JSON representation of the index to w.
func (idx *Index) Dump(w io.Writer) error {
	debug.Log("Index.Dump", "dumping index")
	idx.m.Lock()
	defer idx.m.Unlock()

	list, err := idx.generatePackList()
	if err != nil {
		return err
	}

	outer := jsonIndex{
		Supersedes: idx.Supersedes(),
		Packs:      list,
	}

	buf, err := json.MarshalIndent(outer, "", "  ")
	if err != nil {
		return err
	}

	_, err = w.Write(append(buf, '\n'))
	if err != nil {
		return err
	}

	debug.Log("Index.Dump", "done")

	return nil
}
コード例 #14
0
ファイル: packer_manager.go プロジェクト: fawick/restic
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
	data, err := p.Finalize()
	if err != nil {
		return err
	}

	id := backend.Hash(data)
	h := backend.Handle{Type: backend.Data, Name: id.String()}

	err = r.be.Save(h, data)
	if err != nil {
		debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
		return err
	}

	debug.Log("Repo.savePacker", "saved as %v", h)

	// update blobs in the index
	for _, b := range p.Blobs() {
		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), id.Str())
		r.idx.Current().Store(PackedBlob{
			Type:   b.Type,
			ID:     b.ID,
			PackID: id,
			Offset: b.Offset,
			Length: uint(b.Length),
		})
	}

	return nil
}
コード例 #15
0
ファイル: archiver.go プロジェクト: MirkoDziadzka/restic
func (j archiveJob) Copy() pipe.Job {
	if !j.hasOld {
		return j.new
	}

	// handle files
	if isRegularFile(j.new.Info()) {
		debug.Log("archiveJob.Copy", "   job %v is file", j.new.Path())

		// if type has changed, return new job directly
		if j.old.Node == nil {
			return j.new
		}

		// if file is newer, return the new job
		if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) {
			debug.Log("archiveJob.Copy", "   job %v is newer", j.new.Path())
			return j.new
		}

		debug.Log("archiveJob.Copy", "   job %v add old data", j.new.Path())
		// otherwise annotate job with old data
		e := j.new.(pipe.Entry)
		e.Node = j.old.Node
		return e
	}

	// dirs and other types are just returned
	return j.new
}
コード例 #16
0
ファイル: global.go プロジェクト: restic/restic
// Create the backend specified by URI.
func create(s string) (restic.Backend, error) {
	debug.Log("parsing location %v", s)
	loc, err := location.Parse(s)
	if err != nil {
		return nil, err
	}

	switch loc.Scheme {
	case "local":
		debug.Log("create local repository at %#v", loc.Config)
		return local.Create(loc.Config.(string))
	case "sftp":
		debug.Log("create sftp repository at %#v", loc.Config)
		return sftp.CreateWithConfig(loc.Config.(sftp.Config))
	case "s3":
		cfg := loc.Config.(s3.Config)
		if cfg.KeyID == "" {
			cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")

		}
		if cfg.Secret == "" {
			cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
		}

		debug.Log("create s3 repository at %#v", loc.Config)
		return s3.Open(cfg)
	case "rest":
		return rest.Open(loc.Config.(rest.Config))
	}

	debug.Log("invalid repository scheme: %v", s)
	return nil, errors.Fatalf("invalid scheme %q", loc.Scheme)
}
コード例 #17
0
ファイル: walk.go プロジェクト: fawick/restic
// Walk starts walking the tree given by id. When the channel done is closed,
// processing stops.
func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) {
	debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
	defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)

	resCh := make(chan loadTreeResult, 1)
	tw.ch <- loadTreeJob{
		id:  id,
		res: resCh,
	}

	res := <-resCh
	if res.err != nil {
		select {
		case tw.out <- WalkTreeJob{Path: path, Error: res.err}:
		case <-done:
			return
		}
		return
	}

	tw.walk(path, res.tree, done)

	select {
	case tw.out <- WalkTreeJob{Path: path, Tree: res.tree}:
	case <-done:
		return
	}
}
コード例 #18
0
ファイル: restorer.go プロジェクト: ckemper67/restic
func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error {
	debug.Log("node %v, dir %v, dst %v", node.Name, dir, dst)
	dstPath := filepath.Join(dst, dir, node.Name)

	err := node.CreateAt(dstPath, res.repo)
	if err != nil {
		debug.Log("node.CreateAt(%s) error %v", dstPath, err)
	}

	// Did it fail because of ENOENT?
	if err != nil && os.IsNotExist(errors.Cause(err)) {
		debug.Log("create intermediate paths")

		// Create parent directories and retry
		err = fs.MkdirAll(filepath.Dir(dstPath), 0700)
		if err == nil || os.IsExist(errors.Cause(err)) {
			err = node.CreateAt(dstPath, res.repo)
		}
	}

	if err != nil {
		debug.Log("error %v", err)
		err = res.Error(dstPath, node, err)
		if err != nil {
			return err
		}
	}

	debug.Log("successfully restored %v", node.Name)

	return nil
}
コード例 #19
0
ファイル: global.go プロジェクト: fawick/restic
// Open the backend specified by a location config.
func open(s string) (backend.Backend, error) {
	debug.Log("open", "parsing location %v", s)
	loc, err := location.Parse(s)
	if err != nil {
		return nil, err
	}

	switch loc.Scheme {
	case "local":
		debug.Log("open", "opening local repository at %#v", loc.Config)
		return local.Open(loc.Config.(string))
	case "sftp":
		debug.Log("open", "opening sftp repository at %#v", loc.Config)
		return sftp.OpenWithConfig(loc.Config.(sftp.Config))
	case "s3":
		cfg := loc.Config.(s3.Config)
		if cfg.KeyID == "" {
			cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")

		}
		if cfg.Secret == "" {
			cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
		}

		debug.Log("open", "opening s3 repository at %#v", cfg)
		return s3.Open(cfg)
	case "rest":
		return rest.Open(loc.Config.(rest.Config))
	}

	debug.Log("open", "invalid repository location: %v", s)
	return nil, fmt.Errorf("invalid scheme %q", loc.Scheme)
}
コード例 #20
0
ファイル: walk.go プロジェクト: fawick/restic
// WalkTree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
	debug.Log("WalkTree", "start on %v, start workers", id.Str())

	load := func(id backend.ID) (*Tree, error) {
		tree := &Tree{}
		err := repo.LoadJSONPack(pack.Tree, id, tree)
		if err != nil {
			return nil, err
		}
		return tree, nil
	}

	ch := make(chan loadTreeJob)

	var wg sync.WaitGroup
	for i := 0; i < loadTreeWorkers; i++ {
		wg.Add(1)
		go loadTreeWorker(&wg, ch, load, done)
	}

	tw := NewTreeWalker(ch, jobCh)
	tw.Walk("", id, done)
	close(jobCh)

	close(ch)
	wg.Wait()

	debug.Log("WalkTree", "done")
}
コード例 #21
0
ファイル: checker.go プロジェクト: MirkoDziadzka/restic
func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
	debug.Log("Checker.testPackID", "worker start")
	defer debug.Log("Checker.testPackID", "worker done")

	defer wg.Done()

	for id := range inChan {
		ok, err := repo.Backend().Test(backend.Data, id.String())
		if err != nil {
			err = PackError{ID: id, Err: err}
		} else {
			if !ok {
				err = PackError{ID: id, Err: errors.New("does not exist")}
			}
		}

		if err != nil {
			debug.Log("Checker.testPackID", "error checking for pack %s: %v", id.Str(), err)
			select {
			case <-done:
				return
			case errChan <- err:
			}

			continue
		}

		debug.Log("Checker.testPackID", "pack %s exists", id.Str())
	}
}
コード例 #22
0
ファイル: key.go プロジェクト: ckemper67/restic
// SearchKey tries to decrypt at most maxKeys keys in the backend with the
// given password. If none could be found, ErrNoKeyFound is returned. When
// maxKeys is reached, ErrMaxKeysReached is returned. When setting maxKeys to
// zero, all keys in the repo are checked.
func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
	checked := 0

	// try at most maxKeysForSearch keys in repo
	done := make(chan struct{})
	defer close(done)
	for name := range s.Backend().List(restic.KeyFile, done) {
		if maxKeys > 0 && checked > maxKeys {
			return nil, ErrMaxKeysReached
		}

		debug.Log("trying key %v", name[:12])
		key, err := OpenKey(s, name, password)
		if err != nil {
			debug.Log("key %v returned error %v", name[:12], err)

			// ErrUnauthenticated means the password is wrong, try the next key
			if errors.Cause(err) == crypto.ErrUnauthenticated {
				continue
			}

			return nil, err
		}

		debug.Log("successfully opened key %v", name[:12])
		return key, nil
	}

	return nil, ErrNoKeyFound
}
コード例 #23
0
ファイル: index.go プロジェクト: fawick/restic
// DecodeIndex loads and unserializes an index from rd.
func DecodeIndex(rd io.Reader) (idx *Index, err error) {
	debug.Log("Index.DecodeIndex", "Start decoding index")
	idxJSON := jsonIndex{}

	dec := json.NewDecoder(rd)
	err = dec.Decode(&idxJSON)
	if err != nil {
		debug.Log("Index.DecodeIndex", "Error %v", err)

		if isErrOldIndex(err) {
			debug.Log("Index.DecodeIndex", "index is probably old format, trying that")
			err = ErrOldIndexFormat
		}

		return nil, err
	}

	idx = NewIndex()
	for _, pack := range idxJSON.Packs {
		for _, blob := range pack.Blobs {
			idx.store(PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				Offset: blob.Offset,
				Length: blob.Length,
				PackID: pack.ID,
			})
		}
	}
	idx.supersedes = idxJSON.Supersedes
	idx.final = true

	debug.Log("Index.DecodeIndex", "done")
	return idx, err
}
コード例 #24
0
ファイル: index.go プロジェクト: fawick/restic
// DecodeOldIndex loads and unserializes an index in the old format from rd.
func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
	debug.Log("Index.DecodeOldIndex", "Start decoding old index")
	list := []*packJSON{}

	dec := json.NewDecoder(rd)
	err = dec.Decode(&list)
	if err != nil {
		debug.Log("Index.DecodeOldIndex", "Error %#v", err)
		return nil, err
	}

	idx = NewIndex()
	for _, pack := range list {
		for _, blob := range pack.Blobs {
			idx.store(PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				PackID: pack.ID,
				Offset: blob.Offset,
				Length: blob.Length,
			})
		}
	}
	idx.final = true

	debug.Log("Index.DecodeOldIndex", "done")
	return idx, err
}
コード例 #25
0
ファイル: snapshot.go プロジェクト: ckemper67/restic
func (sn *SnapshotsDir) updateCache(ctx context.Context) error {
	debug.Log("called")
	sn.Lock()
	defer sn.Unlock()

	for id := range sn.repo.List(restic.SnapshotFile, ctx.Done()) {
		if sn.processed.Has(id) {
			debug.Log("skipping snapshot %v, already in list", id.Str())
			continue
		}

		debug.Log("found snapshot id %v", id.Str())
		snapshot, err := restic.LoadSnapshot(sn.repo, id)
		if err != nil {
			return err
		}

		timestamp := snapshot.Time.Format(time.RFC3339)
		for i := 1; ; i++ {
			if _, ok := sn.knownSnapshots[timestamp]; !ok {
				break
			}

			timestamp = fmt.Sprintf("%s-%d", snapshot.Time.Format(time.RFC3339), i)
		}

		debug.Log("  add %v as dir %v", id.Str(), timestamp)
		sn.knownSnapshots[timestamp] = SnapshotWithId{snapshot, id}
		sn.processed.Insert(id)
	}
	return nil
}
コード例 #26
0
ファイル: file.go プロジェクト: MirkoDziadzka/restic
func (f *file) getBlobAt(i int) (blob []byte, err error) {
	debug.Log("file.getBlobAt", "getBlobAt(%v, %v)", f.node.Name, i)
	if f.blobs[i] != nil {
		return f.blobs[i], nil
	}

	buf := blobPool.Get().([]byte)
	buf = buf[:cap(buf)]

	if uint(len(buf)) < f.sizes[i] {
		if len(buf) > defaultBlobSize {
			blobPool.Put(buf)
		}
		buf = make([]byte, f.sizes[i])
	}

	blob, err = f.repo.LoadBlob(pack.Data, f.node.Content[i], buf)
	if err != nil {
		debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
		return nil, err
	}
	f.blobs[i] = blob

	return blob, nil
}
コード例 #27
0
ファイル: file.go プロジェクト: MirkoDziadzka/restic
func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error) {
	debug.Log("newFile", "create new file for %v with %d blobs", node.Name, len(node.Content))
	var bytes uint64
	sizes := make([]uint, len(node.Content))
	for i, id := range node.Content {
		size, err := repo.LookupBlobSize(id)
		if err != nil {
			return nil, err
		}

		sizes[i] = size
		bytes += uint64(size)
	}

	if bytes != node.Size {
		debug.Log("newFile", "sizes do not match: node.Size %v != size %v, using real size", node.Size, bytes)
		node.Size = bytes
	}

	return &file{
		repo:        repo,
		node:        node,
		sizes:       sizes,
		blobs:       make([][]byte, len(node.Content)),
		ownerIsRoot: ownerIsRoot,
	}, nil
}
コード例 #28
0
ファイル: dir.go プロジェクト: MirkoDziadzka/restic
func newDirFromSnapshot(repo *repository.Repository, snapshot SnapshotWithId, ownerIsRoot bool) (*dir, error) {
	debug.Log("newDirFromSnapshot", "new dir for snapshot %v (%v)", snapshot.ID.Str(), snapshot.Tree.Str())
	tree, err := restic.LoadTree(repo, *snapshot.Tree)
	if err != nil {
		debug.Log("newDirFromSnapshot", "  loadTree(%v) failed: %v", snapshot.ID.Str(), err)
		return nil, err
	}
	items := make(map[string]*restic.Node)
	for _, n := range tree.Nodes {
		nodes, err := replaceSpecialNodes(repo, n)
		if err != nil {
			debug.Log("newDirFromSnapshot", "  replaceSpecialNodes(%v) failed: %v", n, err)
			return nil, err
		}

		for _, node := range nodes {
			items[node.Name] = node
		}
	}

	return &dir{
		repo: repo,
		node: &restic.Node{
			UID:        uint32(os.Getuid()),
			GID:        uint32(os.Getgid()),
			AccessTime: snapshot.Time,
			ModTime:    snapshot.Time,
			ChangeTime: snapshot.Time,
			Mode:       os.ModeDir | 0555,
		},
		items:       items,
		inode:       inodeFromBackendId(snapshot.ID),
		ownerIsRoot: ownerIsRoot,
	}, nil
}
コード例 #29
0
ファイル: packer_manager.go プロジェクト: ckemper67/restic
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
	debug.Log("save packer with %d blobs\n", p.Count())
	n, err := p.Finalize()
	if err != nil {
		return err
	}

	tmpfile := p.Writer().(*os.File)
	f, err := fs.Open(tmpfile.Name())
	if err != nil {
		return errors.Wrap(err, "Open")
	}

	data := make([]byte, n)
	m, err := io.ReadFull(f, data)
	if err != nil {
		return errors.Wrap(err, "ReadFul")
	}

	if uint(m) != n {
		return errors.Errorf("read wrong number of bytes from %v: want %v, got %v", tmpfile.Name(), n, m)
	}

	if err = f.Close(); err != nil {
		return errors.Wrap(err, "Close")
	}

	id := restic.Hash(data)
	h := restic.Handle{Type: restic.DataFile, Name: id.String()}

	err = r.be.Save(h, data)
	if err != nil {
		debug.Log("Save(%v) error: %v", h, err)
		return err
	}

	debug.Log("saved as %v", h)

	err = fs.Remove(tmpfile.Name())
	if err != nil {
		return errors.Wrap(err, "Remove")
	}

	// update blobs in the index
	for _, b := range p.Blobs() {
		debug.Log("  updating blob %v to pack %v", b.ID.Str(), id.Str())
		r.idx.Current().Store(restic.PackedBlob{
			Blob: restic.Blob{
				Type:   b.Type,
				ID:     b.ID,
				Offset: b.Offset,
				Length: uint(b.Length),
			},
			PackID: id,
		})
	}

	return nil
}
コード例 #30
0
ファイル: checker.go プロジェクト: ckemper67/restic
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) {
	debug.Log("checking tree %v", id.Str())

	var blobs []restic.ID

	for _, node := range tree.Nodes {
		switch node.Type {
		case "file":
			if node.Content == nil {
				errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)})
			}

			for b, blobID := range node.Content {
				if blobID.IsNull() {
					errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)})
					continue
				}
				blobs = append(blobs, blobID)
			}
		case "dir":
			if node.Subtree == nil {
				errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)})
				continue
			}

			if node.Subtree.IsNull() {
				errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)})
				continue
			}

		case "symlink", "socket", "chardev", "dev", "fifo":
			// nothing to check

		default:
			errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)})
		}

		if node.Name == "" {
			errs = append(errs, Error{TreeID: id, Err: errors.New("node with empty name")})
		}
	}

	for _, blobID := range blobs {
		c.blobRefs.Lock()
		c.blobRefs.M[blobID]++
		debug.Log("blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID])
		c.blobRefs.Unlock()

		if !c.blobs.Has(blobID) {
			debug.Log("tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str())

			errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")})
		}
	}

	return errs
}