func TestLockRefresh(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() lock, err := restic.NewLock(repo) OK(t, err) var lockID *restic.ID for id := range repo.List(restic.LockFile, nil) { if lockID != nil { t.Error("more than one lock found") } lockID = &id } OK(t, lock.Refresh()) var lockID2 *restic.ID for id := range repo.List(restic.LockFile, nil) { if lockID2 != nil { t.Error("more than one lock found") } lockID2 = &id } Assert(t, !lockID.Equal(*lockID2), "expected a new ID after lock refresh, got the same") OK(t, lock.Unlock()) }
// SaveBlob saves a blob of type t into the repository. If id is the null id, it // will be computed and returned. func (r *Repository) SaveBlob(t restic.BlobType, buf []byte, id restic.ID) (restic.ID, error) { var i *restic.ID if !id.IsNull() { i = &id } return r.SaveAndEncrypt(t, buf, i) }
// Tree walks the tree specified by id recursively and sends a job for each // file and directory it finds. When the channel done is closed, processing // stops. func Tree(repo TreeLoader, id restic.ID, done chan struct{}, jobCh chan<- TreeJob) { debug.Log("start on %v, start workers", id.Str()) load := func(id restic.ID) (*restic.Tree, error) { tree, err := repo.LoadTree(id) if err != nil { return nil, err } return tree, nil } ch := make(chan loadTreeJob) var wg sync.WaitGroup for i := 0; i < loadTreeWorkers; i++ { wg.Add(1) go loadTreeWorker(&wg, ch, load, done) } tw := NewTreeWalker(ch, jobCh) tw.Walk("", id, done) close(jobCh) close(ch) wg.Wait() debug.Log("done") }
// Lookup queries the index for the blob ID and returns a restic.PackedBlob. func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) { idx.m.Lock() defer idx.m.Unlock() h := restic.BlobHandle{ID: id, Type: tpe} if packs, ok := idx.pack[h]; ok { blobs = make([]restic.PackedBlob, 0, len(packs)) for _, p := range packs { debug.Log("id %v found in pack %v at %d, length %d", id.Str(), p.packID.Str(), p.offset, p.length) blob := restic.PackedBlob{ Blob: restic.Blob{ Type: tpe, Length: p.length, ID: id, Offset: p.offset, }, PackID: p.packID, } blobs = append(blobs, blob) } return blobs, nil } debug.Log("id %v not found", id.Str()) return nil, errors.Errorf("id %v not found in index", id) }
// LoadTree loads a tree from the repository. func (r *Repository) LoadTree(id restic.ID) (*restic.Tree, error) { debug.Log("load tree %v", id.Str()) size, err := r.idx.LookupSize(id, restic.TreeBlob) if err != nil { return nil, err } debug.Log("size is %d, create buffer", size) buf := make([]byte, size) n, err := r.loadBlob(id, restic.TreeBlob, buf) if err != nil { return nil, err } buf = buf[:n] t := &restic.Tree{} err = json.Unmarshal(buf, t) if err != nil { return nil, err } return t, nil }
// Walk starts walking the tree given by id. When the channel done is closed, // processing stops. func (tw *TreeWalker) Walk(path string, id restic.ID, done chan struct{}) { debug.Log("starting on tree %v for %v", id.Str(), path) defer debug.Log("done walking tree %v for %v", id.Str(), path) resCh := make(chan loadTreeResult, 1) tw.ch <- loadTreeJob{ id: id, res: resCh, } res := <-resCh if res.err != nil { select { case tw.out <- TreeJob{Path: path, Error: res.err}: case <-done: return } return } tw.walk(path, res.tree, done) select { case tw.out <- TreeJob{Path: path, Tree: res.tree}: case <-done: return } }
func testRunRestoreIncludes(t testing.TB, gopts GlobalOptions, dir string, snapshotID restic.ID, includes []string) { opts := RestoreOptions{ Target: dir, Include: includes, } OK(t, runRestore(opts, gopts, []string{snapshotID.String()})) }
func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { debug.Log("checking tree %v", id.Str()) var blobs []restic.ID for _, node := range tree.Nodes { switch node.Type { case "file": if node.Content == nil { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q has nil blob list", node.Name)}) } for b, blobID := range node.Content { if blobID.IsNull() { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("file %q blob %d has null ID", node.Name, b)}) continue } blobs = append(blobs, blobID) } case "dir": if node.Subtree == nil { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q has no subtree", node.Name)}) continue } if node.Subtree.IsNull() { errs = append(errs, Error{TreeID: id, Err: errors.Errorf("dir node %q subtree id is null", node.Name)}) continue } case "symlink", "socket", "chardev", "dev", "fifo": // nothing to check default: errs = append(errs, Error{TreeID: id, Err: errors.Errorf("node %q with invalid type %q", node.Name, node.Type)}) } if node.Name == "" { errs = append(errs, Error{TreeID: id, Err: errors.New("node with empty name")}) } } for _, blobID := range blobs { c.blobRefs.Lock() c.blobRefs.M[blobID]++ debug.Log("blob %v refcount %d", blobID.Str(), c.blobRefs.M[blobID]) c.blobRefs.Unlock() if !c.blobs.Has(blobID) { debug.Log("tree %v references blob %v which isn't contained in index", id.Str(), blobID.Str()) errs = append(errs, Error{TreeID: id, BlobID: blobID, Err: errors.New("not found in index")}) } } return errs }
func loadIndexJSON(repo restic.Repository, id restic.ID) (*indexJSON, error) { debug.Log("process index %v\n", id.Str()) var idx indexJSON err := repo.LoadJSONUnpacked(restic.IndexFile, id, &idx) if err != nil { return nil, err } return &idx, nil }
// LoadIndex loads the index id from backend and returns it. func LoadIndex(repo restic.Repository, id restic.ID) (*Index, error) { idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex) if err == nil { return idx, nil } if errors.Cause(err) == ErrOldIndexFormat { fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str()) return LoadIndexWithDecoder(repo, id, DecodeOldIndex) } return nil, err }
func loadTreeFromSnapshot(repo restic.Repository, id restic.ID) (restic.ID, error) { sn, err := restic.LoadSnapshot(repo, id) if err != nil { debug.Log("error loading snapshot %v: %v", id.Str(), err) return restic.ID{}, err } if sn.Tree == nil { debug.Log("snapshot %v has no tree", id.Str()) return restic.ID{}, errors.Errorf("snapshot %v has no tree", id) } return *sn.Tree, nil }
// ListPack returns the list of blobs saved in the pack id and the length of // the file as stored in the backend. func (r *Repository) ListPack(id restic.ID) ([]restic.Blob, int64, error) { h := restic.Handle{Type: restic.DataFile, Name: id.String()} blobInfo, err := r.Backend().Stat(h) if err != nil { return nil, 0, err } blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), blobInfo.Size) if err != nil { return nil, 0, err } return blobs, blobInfo.Size, nil }
// checkPack reads a pack and checks the integrity of all blobs. func checkPack(r restic.Repository, id restic.ID) error { debug.Log("checking pack %v", id.Str()) h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(r.Backend(), h, nil) if err != nil { return err } hash := restic.Hash(buf) if !hash.Equal(id) { debug.Log("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) } blobs, err := pack.List(r.Key(), bytes.NewReader(buf), int64(len(buf))) if err != nil { return err } var errs []error for i, blob := range blobs { debug.Log(" check blob %d: %v", i, blob.ID.Str()) plainBuf := make([]byte, blob.Length) n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) if err != nil { debug.Log(" error decrypting blob %v: %v", blob.ID.Str(), err) errs = append(errs, errors.Errorf("blob %v: %v", i, err)) continue } plainBuf = plainBuf[:n] hash := restic.Hash(plainBuf) if !hash.Equal(blob.ID) { debug.Log(" Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()) errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) continue } } if len(errs) > 0 { return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs) } return nil }
// SetID sets the ID the index has been written to. This requires that // Finalize() has been called before, otherwise an error is returned. func (idx *Index) SetID(id restic.ID) error { idx.m.Lock() defer idx.m.Unlock() if !idx.final { return errors.New("indexs is not final") } if !idx.id.IsNull() { return errors.New("ID already set") } debug.Log("ID set to %v", id.Str()) idx.id = id return nil }
// RemovePack deletes a pack from the index. func (idx *Index) RemovePack(id restic.ID) error { if _, ok := idx.Packs[id]; !ok { return errors.Errorf("pack %v not found in the index", id.Str()) } for _, blob := range idx.Packs[id].Entries { h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} idx.Blobs[h].Packs.Delete(id) if len(idx.Blobs[h].Packs) == 0 { delete(idx.Blobs, h) } } delete(idx.Packs, id) return nil }
// LoadIndexWithDecoder loads the index and decodes it with fn. func LoadIndexWithDecoder(repo restic.Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) { debug.Log("Loading index %v", id.Str()) buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) if err != nil { return nil, err } idx, err = fn(bytes.NewReader(buf)) if err != nil { debug.Log("error while decoding index %v: %v", id, err) return nil, err } idx.id = id return idx, nil }
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data // is small enough, it will be packed together with other small blobs. func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.ID) (restic.ID, error) { if id == nil { // compute plaintext hash hashedID := restic.Hash(data) id = &hashedID } debug.Log("save id %v (%v, %d bytes)", id.Str(), t, len(data)) // get buf from the pool ciphertext := getBuf() defer freeBuf(ciphertext) // encrypt blob ciphertext, err := r.Encrypt(ciphertext, data) if err != nil { return restic.ID{}, err } // find suitable packer and add blob packer, err := r.findPacker(uint(len(ciphertext))) if err != nil { return restic.ID{}, err } // save ciphertext _, err = packer.Add(t, *id, ciphertext) if err != nil { return restic.ID{}, err } // if the pack is not full enough and there are less than maxPackers // packers, put back to the list if packer.Size() < minPackSize && r.countPacker() < maxPackers { debug.Log("pack is not full enough (%d bytes)", packer.Size()) r.insertPacker(packer) return *id, nil } // else write the pack to the backend return *id, r.savePacker(packer) }
// LoadBlob loads a blob of type t from the repository to the buffer. func (r *Repository) LoadBlob(t restic.BlobType, id restic.ID, buf []byte) (int, error) { debug.Log("load blob %v into buf %p", id.Str(), buf) size, err := r.idx.LookupSize(id, t) if err != nil { return 0, err } if len(buf) < int(size) { return 0, errors.Errorf("buffer is too small for data blob (%d < %d)", len(buf), size) } n, err := r.loadBlob(id, t, buf) if err != nil { return 0, err } buf = buf[:n] debug.Log("loaded %d bytes into buf %p", len(buf), buf) return len(buf), err }
// AddPack adds a pack to the index. If this pack is already in the index, an // error is returned. func (idx *Index) AddPack(id restic.ID, size int64, entries []restic.Blob) error { if _, ok := idx.Packs[id]; ok { return errors.Errorf("pack %v already present in the index", id.Str()) } idx.Packs[id] = Pack{Size: size, Entries: entries} for _, entry := range entries { h := restic.BlobHandle{ID: entry.ID, Type: entry.Type} if _, ok := idx.Blobs[h]; !ok { idx.Blobs[h] = Blob{ Size: int64(entry.Length), Packs: restic.NewIDSet(), } } idx.Blobs[h].Packs.Insert(id) } return nil }
func findInSnapshot(repo *repository.Repository, pat findPattern, id restic.ID) error { debug.Log("searching in snapshot %s\n for entries within [%s %s]", id.Str(), pat.oldest, pat.newest) sn, err := restic.LoadSnapshot(repo, id) if err != nil { return err } results, err := findInTree(repo, pat, *sn.Tree, "") if err != nil { return err } if len(results) == 0 { return nil } Verbosef("found %d matching entries in snapshot %s\n", len(results), id) for _, res := range results { res.node.Name = filepath.Join(res.path, res.node.Name) Printf(" %s\n", res.node) } return nil }
// Save stores a blob read from rd in the repository. func (arch *Archiver) Save(t restic.BlobType, data []byte, id restic.ID) error { debug.Log("Save(%v, %v)\n", t, id.Str()) if arch.isKnownBlob(id, restic.DataBlob) { debug.Log("blob %v is known\n", id.Str()) return nil } _, err := arch.repo.SaveBlob(t, data, id) if err != nil { debug.Log("Save(%v, %v): error %v\n", t, id.Str(), err) return err } debug.Log("Save(%v, %v): new blob\n", t, id.Str()) return nil }
// Lookup queries all known Indexes for the ID and returns the first match. func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []restic.PackedBlob, err error) { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() debug.Log("looking up id %v, tpe %v", id.Str(), tpe) for _, idx := range mi.idx { blobs, err = idx.Lookup(id, tpe) if err == nil { debug.Log("MasterIndex.Lookup", "found id %v: %v", id.Str(), blobs) return } } debug.Log("id %v not found in any index", id.Str()) return nil, errors.Errorf("id %v not found in any index", id) }
// LoadAndDecrypt loads and decrypts data identified by t and id from the // backend. func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { debug.Log("load %v with id %v", t, id.Str()) h := restic.Handle{Type: t, Name: id.String()} buf, err := backend.LoadAll(r.be, h, nil) if err != nil { debug.Log("error loading %v: %v", id.Str(), err) return nil, err } if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { return nil, errors.New("invalid data returned") } plain := make([]byte, len(buf)) // decrypt n, err := r.decryptTo(plain, buf) if err != nil { return nil, err } return plain[:n], nil }
func removeLock(repo restic.Repository, id restic.ID) error { return repo.Backend().Remove(restic.LockFile, id.String()) }
// loadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) { debug.Log("load %v with id %v (buf %p, len %d)", t, id.Str(), plaintextBuf, len(plaintextBuf)) // lookup plaintext size of blob size, err := r.idx.LookupSize(id, t) if err != nil { return 0, err } // make sure the plaintext buffer is large enough, extend otherwise if len(plaintextBuf) < int(size) { return 0, errors.Errorf("buffer is too small: %d < %d", len(plaintextBuf), size) } // lookup packs blobs, err := r.idx.Lookup(id, t) if err != nil { debug.Log("id %v not found in index: %v", id.Str(), err) return 0, err } var lastError error for _, blob := range blobs { debug.Log("id %v found: %v", id.Str(), blob) if blob.Type != t { debug.Log("blob %v has wrong block type, want %v", blob, t) } // load blob from pack h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} ciphertextBuf := make([]byte, blob.Length) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { debug.Log("error loading blob %v: %v", blob, err) lastError = err continue } if uint(n) != blob.Length { lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", id.Str(), blob.Length, uint(n)) debug.Log("lastError: %v", lastError) continue } // decrypt n, err = r.decryptTo(plaintextBuf, ciphertextBuf) if err != nil { lastError = errors.Errorf("decrypting blob %v failed: %v", id, err) continue } plaintextBuf = plaintextBuf[:n] // check hash if !restic.Hash(plaintextBuf).Equal(id) { lastError = errors.Errorf("blob %v returned invalid hash", id) continue } return len(plaintextBuf), nil } if lastError != nil { return 0, lastError } return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) }
func runCat(gopts GlobalOptions, args []string) error { if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { return errors.Fatalf("type or ID not specified") } repo, err := OpenRepository(gopts) if err != nil { return err } lock, err := lockRepo(repo) defer unlockRepo(lock) if err != nil { return err } tpe := args[0] var id restic.ID if tpe != "masterkey" && tpe != "config" { id, err = restic.ParseID(args[1]) if err != nil { if tpe != "snapshot" { return errors.Fatalf("unable to parse ID: %v\n", err) } // find snapshot id with prefix id, err = restic.FindSnapshot(repo, args[1]) if err != nil { return err } } } // handle all types that don't need an index switch tpe { case "config": buf, err := json.MarshalIndent(repo.Config(), "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "index": buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) if err != nil { return err } _, err = os.Stdout.Write(append(buf, '\n')) return err case "snapshot": sn := &restic.Snapshot{} err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn) if err != nil { return err } buf, err := json.MarshalIndent(&sn, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "key": h := restic.Handle{Type: restic.KeyFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } key := &repository.Key{} err = json.Unmarshal(buf, key) if err != nil { return err } buf, err = json.MarshalIndent(&key, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "masterkey": buf, err := json.MarshalIndent(repo.Key(), "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "lock": lock, err := restic.LoadLock(repo, id) if err != nil { return err } buf, err := json.MarshalIndent(&lock, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil } // load index, handle all the other types err = repo.LoadIndex() if err != nil { return err } switch tpe { case "pack": h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } hash := restic.Hash(buf) if !hash.Equal(id) { fmt.Fprintf(stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String()) } _, err = os.Stdout.Write(buf) return err case "blob": for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { list, err := repo.Index().Lookup(id, t) if err != nil { continue } blob := list[0] buf := make([]byte, blob.Length) n, err := repo.LoadBlob(restic.DataBlob, id, buf) if err != nil { return err } buf = buf[:n] _, err = os.Stdout.Write(buf) return err } return errors.Fatal("blob not found") case "tree": debug.Log("cat tree %v", id.Str()) tree, err := repo.LoadTree(id) if err != nil { debug.Log("unable to load tree %v: %v", id.Str(), err) return err } buf, err := json.MarshalIndent(&tree, "", " ") if err != nil { debug.Log("error json.MarshalIndent(): %v", err) return err } _, err = os.Stdout.Write(append(buf, '\n')) return nil default: return errors.Fatal("invalid type") } }
func runBackup(opts BackupOptions, gopts GlobalOptions, args []string) error { target, err := readLinesFromFile(opts.FilesFrom) if err != nil { return err } // merge files from files-from into normal args so we can reuse the normal // args checks and have the ability to use both files-from and args at the // same time args = append(args, target...) if len(args) == 0 { return errors.Fatalf("wrong number of parameters") } for _, d := range args { if a, err := filepath.Abs(d); err == nil { d = a } target = append(target, d) } target, err = filterExisting(target) if err != nil { return err } // allowed devices var allowedDevs map[uint64]struct{} if opts.ExcludeOtherFS { allowedDevs, err = gatherDevices(target) if err != nil { return err } debug.Log("allowed devices: %v\n", allowedDevs) } repo, err := OpenRepository(gopts) if err != nil { return err } lock, err := lockRepo(repo) defer unlockRepo(lock) if err != nil { return err } err = repo.LoadIndex() if err != nil { return err } var parentSnapshotID *restic.ID // Force using a parent if !opts.Force && opts.Parent != "" { id, err := restic.FindSnapshot(repo, opts.Parent) if err != nil { return errors.Fatalf("invalid id %q: %v", opts.Parent, err) } parentSnapshotID = &id } // Find last snapshot to set it as parent, if not already set if !opts.Force && parentSnapshotID == nil { id, err := restic.FindLatestSnapshot(repo, target, "") if err == nil { parentSnapshotID = &id } else if err != restic.ErrNoSnapshotFound { return err } } if parentSnapshotID != nil { Verbosef("using parent snapshot %v\n", parentSnapshotID.Str()) } Verbosef("scan %v\n", target) // add patterns from file if opts.ExcludeFile != "" { file, err := fs.Open(opts.ExcludeFile) if err != nil { Warnf("error reading exclude patterns: %v", err) return nil } scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() if !strings.HasPrefix(line, "#") { line = os.ExpandEnv(line) opts.Excludes = append(opts.Excludes, line) } } } selectFilter := func(item string, fi os.FileInfo) bool { matched, err := filter.List(opts.Excludes, item) if err != nil { Warnf("error for exclude pattern: %v", err) } if matched { debug.Log("path %q excluded by a filter", item) return false } if !opts.ExcludeOtherFS || fi == nil { return true } id, err := fs.DeviceID(fi) if err != nil { // This should never happen because gatherDevices() would have // errored out earlier. If it still does that's a reason to panic. panic(err) } _, found := allowedDevs[id] if !found { debug.Log("path %q on disallowed device %d", item, id) return false } return true } stat, err := archiver.Scan(target, selectFilter, newScanProgress(gopts)) if err != nil { return err } arch := archiver.New(repo) arch.Excludes = opts.Excludes arch.SelectFilter = selectFilter arch.Error = func(dir string, fi os.FileInfo, err error) error { // TODO: make ignoring errors configurable Warnf("%s\rerror for %s: %v\n", ClearLine(), dir, err) return nil } _, id, err := arch.Snapshot(newArchiveProgress(gopts, stat), target, opts.Tags, parentSnapshotID) if err != nil { return err } Verbosef("snapshot %s saved\n", id.Str()) return nil }
func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool { exists, err := repo.Backend().Test(restic.LockFile, id.String()) OK(t, err) return exists }