// TestCreateSnapshot creates a snapshot filled with fake data. The // fake data is generated deterministically from the timestamp `at`, which is // also used as the snapshot's timestamp. func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time) backend.ID { fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05")) snapshot, err := NewSnapshot([]string{fakedir}) if err != nil { t.Fatal(err) } snapshot.Time = at treeID := saveTree(t, repo, at.UnixNano()) snapshot.Tree = &treeID id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot) if err != nil { t.Fatal(err) } t.Logf("saved snapshot %v", id.Str()) err = repo.Flush() if err != nil { t.Fatal(err) } err = repo.SaveIndex() if err != nil { t.Fatal(err) } return id }
// RepackBlobs reads all blobs in blobIDs from src and saves them into new pack // files in dst. Source and destination repo may be the same. func RepackBlobs(src, dst *repository.Repository, blobIDs backend.IDSet) (err error) { for id := range blobIDs { err = repackBlob(src, dst, id) if err != nil { return err } } err = dst.Flush() if err != nil { return err } return nil }
// ArchiveReader reads from the reader and archives the data. Returned is the // resulting snapshot and its ID. func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) { debug.Log("ArchiveReader", "start archiving %s", name) sn, err := NewSnapshot([]string{name}) if err != nil { return nil, backend.ID{}, err } p.Start() defer p.Done() chnker := chunker.New(rd, repo.Config.ChunkerPolynomial) var ids backend.IDs var fileSize uint64 for { chunk, err := chnker.Next(getBuf()) if err == io.EOF { break } if err != nil { return nil, backend.ID{}, err } id := backend.Hash(chunk.Data) if !repo.Index().Has(id) { _, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil) if err != nil { return nil, backend.ID{}, err } debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length) } else { debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str()) } freeBuf(chunk.Data) ids = append(ids, id) p.Report(Stat{Bytes: uint64(chunk.Length)}) fileSize += uint64(chunk.Length) } tree := &Tree{ Nodes: []*Node{ &Node{ Name: name, AccessTime: time.Now(), ModTime: time.Now(), Type: "file", Mode: 0644, Size: fileSize, UID: sn.UID, GID: sn.GID, User: sn.Username, Content: ids, }, }, } treeID, err := saveTreeJSON(repo, tree) if err != nil { return nil, backend.ID{}, err } sn.Tree = &treeID debug.Log("ArchiveReader", "tree saved as %v", treeID.Str()) id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn) if err != nil { return nil, backend.ID{}, err } sn.id = &id debug.Log("ArchiveReader", "snapshot saved as %v", id.Str()) err = repo.Flush() if err != nil { return nil, backend.ID{}, err } err = repo.SaveIndex() if err != nil { return nil, backend.ID{}, err } return sn, id, nil }