func TestLoadLargeBuffer(t *testing.T) { b := mem.New() for i := 0; i < 20; i++ { data := Random(23+i, rand.Intn(MiB)+500*KiB) id := restic.Hash(data) err := b.Save(restic.Handle{Name: id.String(), Type: restic.DataFile}, data) OK(t, err) buf := make([]byte, len(data)+100) buf, err = backend.LoadAll(b, restic.Handle{Type: restic.DataFile, Name: id.String()}, buf) OK(t, err) if len(buf) != len(data) { t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf)) continue } if !bytes.Equal(buf, data) { t.Errorf("wrong data returned") continue } } }
func saveFile(t testing.TB, be Saver, filename string, n int) { f, err := os.Open(filename) if err != nil { t.Fatal(err) } data := make([]byte, n) m, err := io.ReadFull(f, data) if m != n { t.Fatalf("read wrong number of bytes from %v: want %v, got %v", filename, m, n) } if err = f.Close(); err != nil { t.Fatal(err) } h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()} err = be.Save(h, data) if err != nil { t.Fatal(err) } err = os.Remove(filename) if err != nil { t.Fatal(err) } }
func TestSaveFrom(t *testing.T) { repo, cleanup := repository.TestRepository(t) defer cleanup() for _, size := range testSizes { data := make([]byte, size) _, err := io.ReadFull(rand.Reader, data) OK(t, err) id := restic.Hash(data) // save id2, err := repo.SaveBlob(restic.DataBlob, data, id) OK(t, err) Equals(t, id, id2) OK(t, repo.Flush()) // read back buf := make([]byte, size) n, err := repo.LoadBlob(restic.DataBlob, id, buf) OK(t, err) Equals(t, len(buf), n) Assert(t, len(buf) == len(data), "number of bytes read back does not match: expected %d, got %d", len(data), len(buf)) Assert(t, bytes.Equal(buf, data), "data does not match: expected %02x, got %02x", data, buf) } }
// savePacker stores p in the backend. func (r *Repository) savePacker(p *pack.Packer) error { debug.Log("save packer with %d blobs\n", p.Count()) n, err := p.Finalize() if err != nil { return err } tmpfile := p.Writer().(*os.File) f, err := fs.Open(tmpfile.Name()) if err != nil { return errors.Wrap(err, "Open") } data := make([]byte, n) m, err := io.ReadFull(f, data) if err != nil { return errors.Wrap(err, "ReadFul") } if uint(m) != n { return errors.Errorf("read wrong number of bytes from %v: want %v, got %v", tmpfile.Name(), n, m) } if err = f.Close(); err != nil { return errors.Wrap(err, "Close") } id := restic.Hash(data) h := restic.Handle{Type: restic.DataFile, Name: id.String()} err = r.be.Save(h, data) if err != nil { debug.Log("Save(%v) error: %v", h, err) return err } debug.Log("saved as %v", h) err = fs.Remove(tmpfile.Name()) if err != nil { return errors.Wrap(err, "Remove") } // update blobs in the index for _, b := range p.Blobs() { debug.Log(" updating blob %v to pack %v", b.ID.Str(), id.Str()) r.idx.Current().Store(restic.PackedBlob{ Blob: restic.Blob{ Type: b.Type, ID: b.ID, Offset: b.Offset, Length: uint(b.Length), }, PackID: id, }) } return nil }
// checkPack reads a pack and checks the integrity of all blobs. func checkPack(r restic.Repository, id restic.ID) error { debug.Log("checking pack %v", id.Str()) h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(r.Backend(), h, nil) if err != nil { return err } hash := restic.Hash(buf) if !hash.Equal(id) { debug.Log("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return errors.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) } blobs, err := pack.List(r.Key(), bytes.NewReader(buf), int64(len(buf))) if err != nil { return err } var errs []error for i, blob := range blobs { debug.Log(" check blob %d: %v", i, blob.ID.Str()) plainBuf := make([]byte, blob.Length) n, err := crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) if err != nil { debug.Log(" error decrypting blob %v: %v", blob.ID.Str(), err) errs = append(errs, errors.Errorf("blob %v: %v", i, err)) continue } plainBuf = plainBuf[:n] hash := restic.Hash(plainBuf) if !hash.Equal(blob.ID) { debug.Log(" Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()) errs = append(errs, errors.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) continue } } if len(errs) > 0 { return errors.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs) } return nil }
// TestLoadNegativeOffset tests the backend's Load function with negative offsets. func TestLoadNegativeOffset(t testing.TB) { b := open(t) defer close(t) length := rand.Intn(1<<24) + 2000 data := test.Random(23, length) id := restic.Hash(data) handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err := b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) } // test normal reads for i := 0; i < 50; i++ { l := rand.Intn(length + 2000) o := -rand.Intn(length + 2000) buf := make([]byte, l) n, err := b.Load(handle, buf, int64(o)) // if we requested data beyond the end of the file, require // ErrUnexpectedEOF error if len(buf) > -o { if errors.Cause(err) != io.ErrUnexpectedEOF { t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), o) continue } err = nil buf = buf[:-o] } if err != nil { t.Errorf("Load(%d, %d) returned error: %v", len(buf), o, err) continue } if n != len(buf) { t.Errorf("Load(%d, %d) returned short read, only got %d bytes", len(buf), o, n) continue } p := len(data) + o if !bytes.Equal(buf, data[p:p+len(buf)]) { t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), o) continue } } test.OK(t, b.Remove(restic.DataFile, id.String())) }
func genTestContent() map[restic.ID][]byte { m := make(map[restic.ID][]byte) for _, length := range testContentLengths { buf := Random(int(length), int(length)) id := restic.Hash(buf) m[id] = buf testMaxFileSize += length } return m }
func TestShortPack(t *testing.T) { k := crypto.NewRandomKey() bufs, packData, packSize := newPack(t, k, []int{23}) b := mem.New() id := restic.Hash(packData) handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) }
func TestUnpackReadSeeker(t *testing.T) { // create random keys k := crypto.NewRandomKey() bufs, packData, packSize := newPack(t, k, testLens) b := mem.New() id := restic.Hash(packData) handle := restic.Handle{Type: restic.DataFile, Name: id.String()} OK(t, b.Save(handle, packData)) verifyBlobs(t, bufs, k, restic.ReaderAt(b, handle), packSize) }
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *restic.Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) { defer freeBuf(chunk.Data) id := restic.Hash(chunk.Data) err := arch.Save(restic.DataBlob, chunk.Data, id) // TODO handle error if err != nil { panic(err) } p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) arch.blobToken <- token resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)} }
// SaveTreeJSON stores a tree in the repository. func (arch *Archiver) SaveTreeJSON(tree *restic.Tree) (restic.ID, error) { data, err := json.Marshal(tree) if err != nil { return restic.ID{}, errors.Wrap(err, "Marshal") } data = append(data, '\n') // check if tree has been saved before id := restic.Hash(data) if arch.isKnownBlob(id, restic.TreeBlob) { return id, nil } return arch.repo.SaveBlob(restic.TreeBlob, data, id) }
// SaveTree stores a tree into the repository and returns the ID. The ID is // checked against the index. The tree is only stored when the index does not // contain the ID. func (r *Repository) SaveTree(t *restic.Tree) (restic.ID, error) { buf, err := json.Marshal(t) if err != nil { return restic.ID{}, errors.Wrap(err, "MarshalJSON") } // append a newline so that the data is always consistent (json.Encoder // adds a newline after each object) buf = append(buf, '\n') id := restic.Hash(buf) if r.idx.Has(id, restic.TreeBlob) { return id, nil } _, err = r.SaveBlob(restic.TreeBlob, buf, id) return id, err }
// SaveUnpacked encrypts data and stores it in the backend. Returned is the // storage hash. func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, err error) { ciphertext := make([]byte, len(p)+crypto.Extension) ciphertext, err = r.Encrypt(ciphertext, p) if err != nil { return restic.ID{}, err } id = restic.Hash(ciphertext) h := restic.Handle{Type: t, Name: id.String()} err = r.be.Save(h, ciphertext) if err != nil { debug.Log("error saving blob %v: %v", h, err) return restic.ID{}, err } debug.Log("blob %v saved", h) return id, nil }
func testParallelSaveWithDuplication(t *testing.T, seed int) { repo, cleanup := repository.TestRepository(t) defer cleanup() dataSizeMb := 128 duplication := 7 arch := archiver.New(repo) chunks := getRandomData(seed, dataSizeMb*1024*1024) errChannels := [](<-chan error){} // interweaved processing of subsequent chunks maxParallel := 2*duplication - 1 barrier := make(chan struct{}, maxParallel) for _, c := range chunks { for dupIdx := 0; dupIdx < duplication; dupIdx++ { errChan := make(chan error) errChannels = append(errChannels, errChan) go func(c chunker.Chunk, errChan chan<- error) { barrier <- struct{}{} id := restic.Hash(c.Data) time.Sleep(time.Duration(id[0])) err := arch.Save(restic.DataBlob, c.Data, id) <-barrier errChan <- err }(c, errChan) } } for _, errChan := range errChannels { OK(t, <-errChan) } OK(t, repo.Flush()) OK(t, repo.SaveIndex()) chkr := createAndInitChecker(t, repo) assertNoUnreferencedPacks(t, chkr) }
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data // is small enough, it will be packed together with other small blobs. func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.ID) (restic.ID, error) { if id == nil { // compute plaintext hash hashedID := restic.Hash(data) id = &hashedID } debug.Log("save id %v (%v, %d bytes)", id.Str(), t, len(data)) // get buf from the pool ciphertext := getBuf() defer freeBuf(ciphertext) // encrypt blob ciphertext, err := r.Encrypt(ciphertext, data) if err != nil { return restic.ID{}, err } // find suitable packer and add blob packer, err := r.findPacker(uint(len(ciphertext))) if err != nil { return restic.ID{}, err } // save ciphertext _, err = packer.Add(t, *id, ciphertext) if err != nil { return restic.ID{}, err } // if the pack is not full enough and there are less than maxPackers // packers, put back to the list if packer.Size() < minPackSize && r.countPacker() < maxPackers { debug.Log("pack is not full enough (%d bytes)", packer.Size()) r.insertPacker(packer) return *id, nil } // else write the pack to the backend return *id, r.savePacker(packer) }
func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { for i := 0; i < blobs; i++ { var ( tpe restic.BlobType length int ) if rand.Float32() < pData { tpe = restic.DataBlob length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data } else { tpe = restic.TreeBlob length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB } buf := random(t, length) id := restic.Hash(buf) if repo.Index().Has(id, restic.DataBlob) { t.Errorf("duplicate blob %v/%v ignored", id, restic.DataBlob) continue } _, err := repo.SaveBlob(tpe, buf, id) if err != nil { t.Fatalf("SaveFrom() error %v", err) } if rand.Float32() < 0.2 { if err = repo.Flush(); err != nil { t.Fatalf("repo.Flush() returned error %v", err) } } } if err := repo.Flush(); err != nil { t.Fatalf("repo.Flush() returned error %v", err) } }
// LoadAndDecrypt loads and decrypts data identified by t and id from the // backend. func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) { debug.Log("load %v with id %v", t, id.Str()) h := restic.Handle{Type: t, Name: id.String()} buf, err := backend.LoadAll(r.be, h, nil) if err != nil { debug.Log("error loading %v: %v", id.Str(), err) return nil, err } if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) { return nil, errors.New("invalid data returned") } plain := make([]byte, len(buf)) // decrypt n, err := r.decryptTo(plain, buf) if err != nil { return nil, err } return plain[:n], nil }
// TestLoad tests the backend's Load function. func TestLoad(t testing.TB) { b := open(t) defer close(t) _, err := b.Load(restic.Handle{}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for invalid handle") } _, err = b.Load(restic.Handle{Type: restic.DataFile, Name: "foobar"}, nil, 0) if err == nil { t.Fatalf("Load() did not return an error for non-existing blob") } length := rand.Intn(1<<24) + 2000 data := test.Random(23, length) id := restic.Hash(data) handle := restic.Handle{Type: restic.DataFile, Name: id.String()} err = b.Save(handle, data) if err != nil { t.Fatalf("Save() error: %v", err) } for i := 0; i < 50; i++ { l := rand.Intn(length + 2000) o := rand.Intn(length + 2000) d := data if o < len(d) { d = d[o:] } else { o = len(d) d = d[:0] } if l > 0 && l < len(d) { d = d[:l] } buf := make([]byte, l) n, err := b.Load(handle, buf, int64(o)) // if we requested data beyond the end of the file, require // ErrUnexpectedEOF error if l > len(d) { if errors.Cause(err) != io.ErrUnexpectedEOF { t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o)) } err = nil buf = buf[:len(d)] } if err != nil { t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err) continue } if n != len(buf) { t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d", len(buf), int64(o), len(buf), n) continue } buf = buf[:n] if !bytes.Equal(buf, d) { t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o)) continue } } // test with negative offset for i := 0; i < 50; i++ { l := rand.Intn(length + 2000) o := rand.Intn(length + 2000) d := data if o < len(d) { d = d[len(d)-o:] } else { o = 0 } if l > 0 && l < len(d) { d = d[:l] } buf := make([]byte, l) n, err := b.Load(handle, buf, -int64(o)) // if we requested data beyond the end of the file, require // ErrUnexpectedEOF error if l > len(d) { if errors.Cause(err) != io.ErrUnexpectedEOF { t.Errorf("Load(%d, %d) did not return io.ErrUnexpectedEOF", len(buf), int64(o)) continue } err = nil buf = buf[:len(d)] } if err != nil { t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err) continue } if n != len(buf) { t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d", len(buf), int64(o), len(buf), n) continue } buf = buf[:n] if !bytes.Equal(buf, d) { t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o)) continue } } // load with a too-large buffer, this should return io.ErrUnexpectedEOF buf := make([]byte, length+100) n, err := b.Load(handle, buf, 0) if n != length { t.Errorf("wrong length for larger buffer returned, want %d, got %d", length, n) } if errors.Cause(err) != io.ErrUnexpectedEOF { t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err) } test.OK(t, b.Remove(restic.DataFile, id.String())) }
// AddKey adds a new key to an already existing repository. func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) { // make sure we have valid KDF parameters if KDFParams == nil { p, err := crypto.Calibrate(KDFTimeout, KDFMemory) if err != nil { return nil, errors.Wrap(err, "Calibrate") } KDFParams = &p debug.Log("calibrated KDF parameters are %v", p) } // fill meta data about key newkey := &Key{ Created: time.Now(), KDF: "scrypt", N: KDFParams.N, R: KDFParams.R, P: KDFParams.P, } hn, err := os.Hostname() if err == nil { newkey.Hostname = hn } usr, err := user.Current() if err == nil { newkey.Username = usr.Username } // generate random salt newkey.Salt, err = crypto.NewSalt() if err != nil { panic("unable to read enough random bytes for salt: " + err.Error()) } // call KDF to derive user key newkey.user, err = crypto.KDF(*KDFParams, newkey.Salt, password) if err != nil { return nil, err } if template == nil { // generate new random master keys newkey.master = crypto.NewRandomKey() } else { // copy master keys from old key newkey.master = template } // encrypt master keys (as json) with user key buf, err := json.Marshal(newkey.master) if err != nil { return nil, errors.Wrap(err, "Marshal") } newkey.Data, err = crypto.Encrypt(newkey.user, nil, buf) // dump as json buf, err = json.Marshal(newkey) if err != nil { return nil, errors.Wrap(err, "Marshal") } // store in repository and return h := restic.Handle{ Type: restic.KeyFile, Name: restic.Hash(buf).String(), } err = s.be.Save(h, buf) if err != nil { return nil, err } newkey.name = h.Name return newkey, nil }
// loadBlob tries to load and decrypt content identified by t and id from a // pack from the backend, the result is stored in plaintextBuf, which must be // large enough to hold the complete blob. func (r *Repository) loadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) (int, error) { debug.Log("load %v with id %v (buf %p, len %d)", t, id.Str(), plaintextBuf, len(plaintextBuf)) // lookup plaintext size of blob size, err := r.idx.LookupSize(id, t) if err != nil { return 0, err } // make sure the plaintext buffer is large enough, extend otherwise if len(plaintextBuf) < int(size) { return 0, errors.Errorf("buffer is too small: %d < %d", len(plaintextBuf), size) } // lookup packs blobs, err := r.idx.Lookup(id, t) if err != nil { debug.Log("id %v not found in index: %v", id.Str(), err) return 0, err } var lastError error for _, blob := range blobs { debug.Log("id %v found: %v", id.Str(), blob) if blob.Type != t { debug.Log("blob %v has wrong block type, want %v", blob, t) } // load blob from pack h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()} ciphertextBuf := make([]byte, blob.Length) n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset)) if err != nil { debug.Log("error loading blob %v: %v", blob, err) lastError = err continue } if uint(n) != blob.Length { lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d", id.Str(), blob.Length, uint(n)) debug.Log("lastError: %v", lastError) continue } // decrypt n, err = r.decryptTo(plaintextBuf, ciphertextBuf) if err != nil { lastError = errors.Errorf("decrypting blob %v failed: %v", id, err) continue } plaintextBuf = plaintextBuf[:n] // check hash if !restic.Hash(plaintextBuf).Equal(id) { lastError = errors.Errorf("blob %v returned invalid hash", id) continue } return len(plaintextBuf), nil } if lastError != nil { return 0, lastError } return 0, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs)) }
func runCat(gopts GlobalOptions, args []string) error { if len(args) < 1 || (args[0] != "masterkey" && args[0] != "config" && len(args) != 2) { return errors.Fatalf("type or ID not specified") } repo, err := OpenRepository(gopts) if err != nil { return err } lock, err := lockRepo(repo) defer unlockRepo(lock) if err != nil { return err } tpe := args[0] var id restic.ID if tpe != "masterkey" && tpe != "config" { id, err = restic.ParseID(args[1]) if err != nil { if tpe != "snapshot" { return errors.Fatalf("unable to parse ID: %v\n", err) } // find snapshot id with prefix id, err = restic.FindSnapshot(repo, args[1]) if err != nil { return err } } } // handle all types that don't need an index switch tpe { case "config": buf, err := json.MarshalIndent(repo.Config(), "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "index": buf, err := repo.LoadAndDecrypt(restic.IndexFile, id) if err != nil { return err } _, err = os.Stdout.Write(append(buf, '\n')) return err case "snapshot": sn := &restic.Snapshot{} err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, sn) if err != nil { return err } buf, err := json.MarshalIndent(&sn, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "key": h := restic.Handle{Type: restic.KeyFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } key := &repository.Key{} err = json.Unmarshal(buf, key) if err != nil { return err } buf, err = json.MarshalIndent(&key, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "masterkey": buf, err := json.MarshalIndent(repo.Key(), "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil case "lock": lock, err := restic.LoadLock(repo, id) if err != nil { return err } buf, err := json.MarshalIndent(&lock, "", " ") if err != nil { return err } fmt.Println(string(buf)) return nil } // load index, handle all the other types err = repo.LoadIndex() if err != nil { return err } switch tpe { case "pack": h := restic.Handle{Type: restic.DataFile, Name: id.String()} buf, err := backend.LoadAll(repo.Backend(), h, nil) if err != nil { return err } hash := restic.Hash(buf) if !hash.Equal(id) { fmt.Fprintf(stderr, "Warning: hash of data does not match ID, want\n %v\ngot:\n %v\n", id.String(), hash.String()) } _, err = os.Stdout.Write(buf) return err case "blob": for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { list, err := repo.Index().Lookup(id, t) if err != nil { continue } blob := list[0] buf := make([]byte, blob.Length) n, err := repo.LoadBlob(restic.DataBlob, id, buf) if err != nil { return err } buf = buf[:n] _, err = os.Stdout.Write(buf) return err } return errors.Fatal("blob not found") case "tree": debug.Log("cat tree %v", id.Str()) tree, err := repo.LoadTree(id) if err != nil { debug.Log("unable to load tree %v: %v", id.Str(), err) return err } buf, err := json.MarshalIndent(&tree, "", " ") if err != nil { debug.Log("error json.MarshalIndent(): %v", err) return err } _, err = os.Stdout.Write(append(buf, '\n')) return nil default: return errors.Fatal("invalid type") } }
func store(t testing.TB, b restic.Backend, tpe restic.FileType, data []byte) { id := restic.Hash(data) err := b.Save(restic.Handle{Name: id.String(), Type: tpe}, data) test.OK(t, err) }
// ArchiveReader reads from the reader and archives the data. Returned is the // resulting snapshot and its ID. func ArchiveReader(repo restic.Repository, p *restic.Progress, rd io.Reader, name string, tags []string) (*restic.Snapshot, restic.ID, error) { debug.Log("start archiving %s", name) sn, err := restic.NewSnapshot([]string{name}, tags) if err != nil { return nil, restic.ID{}, err } p.Start() defer p.Done() chnker := chunker.New(rd, repo.Config().ChunkerPolynomial) var ids restic.IDs var fileSize uint64 for { chunk, err := chnker.Next(getBuf()) if errors.Cause(err) == io.EOF { break } if err != nil { return nil, restic.ID{}, errors.Wrap(err, "chunker.Next()") } id := restic.Hash(chunk.Data) if !repo.Index().Has(id, restic.DataBlob) { _, err := repo.SaveBlob(restic.DataBlob, chunk.Data, id) if err != nil { return nil, restic.ID{}, err } debug.Log("saved blob %v (%d bytes)\n", id.Str(), chunk.Length) } else { debug.Log("blob %v already saved in the repo\n", id.Str()) } freeBuf(chunk.Data) ids = append(ids, id) p.Report(restic.Stat{Bytes: uint64(chunk.Length)}) fileSize += uint64(chunk.Length) } tree := &restic.Tree{ Nodes: []*restic.Node{ &restic.Node{ Name: name, AccessTime: time.Now(), ModTime: time.Now(), Type: "file", Mode: 0644, Size: fileSize, UID: sn.UID, GID: sn.GID, User: sn.Username, Content: ids, }, }, } treeID, err := repo.SaveTree(tree) if err != nil { return nil, restic.ID{}, err } sn.Tree = &treeID debug.Log("tree saved as %v", treeID.Str()) id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, sn) if err != nil { return nil, restic.ID{}, err } debug.Log("snapshot saved as %v", id.Str()) err = repo.Flush() if err != nil { return nil, restic.ID{}, err } err = repo.SaveIndex() if err != nil { return nil, restic.ID{}, err } return sn, id, nil }