// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // backend as type t, without a pack. It returns the storage hash. func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) { // create file blob, err := s.be.Create() if err != nil { return nil, err } debug.Log("Repo.SaveJSONUnpacked", "create new file %p", blob) // hash hw := backend.NewHashingWriter(blob, sha256.New()) // encrypt blob ewr := crypto.EncryptTo(s.key, hw) enc := json.NewEncoder(ewr) err = enc.Encode(item) if err != nil { return nil, fmt.Errorf("json.Encode: %v", err) } err = ewr.Close() if err != nil { return nil, err } // finalize blob in the backend sid := backend.ID(hw.Sum(nil)) err = blob.Finalize(t, sid.String()) if err != nil { return nil, err } return sid, nil }
func (arch *Archiver) saveChunk(chunk *chunker.Chunk, p *Progress, token struct{}, file *os.File, resultChannel chan<- saveResult) { err := arch.Save(pack.Data, chunk.Digest, chunk.Length, chunk.Reader(file)) // TODO handle error if err != nil { panic(err) } p.Report(Stat{Bytes: uint64(chunk.Length)}) arch.blobToken <- token resultChannel <- saveResult{id: backend.ID(chunk.Digest), bytes: uint64(chunk.Length)} }
func BenchmarkSaveFrom(t *testing.B) { repo := SetupRepo() defer TeardownRepo(repo) size := 4 << 20 // 4MiB data := make([]byte, size) _, err := io.ReadFull(rand.Reader, data) OK(t, err) id := backend.ID(sha256.Sum256(data)) t.ResetTimer() t.SetBytes(int64(size)) for i := 0; i < t.N; i++ { // save err = repo.SaveFrom(pack.Data, &id, uint(size), bytes.NewReader(data)) OK(t, err) } }
// SaveIndex saves all new packs in the index in the backend, returned is the // storage ID. func (s *Repository) SaveIndex() (backend.ID, error) { debug.Log("Repo.SaveIndex", "Saving index") // create blob blob, err := s.be.Create() if err != nil { return nil, err } debug.Log("Repo.SaveIndex", "create new pack %p", blob) // hash hw := backend.NewHashingWriter(blob, sha256.New()) // encrypt blob ewr := crypto.EncryptTo(s.key, hw) err = s.idx.Encode(ewr) if err != nil { return nil, err } err = ewr.Close() if err != nil { return nil, err } // finalize blob in the backend sid := backend.ID(hw.Sum(nil)) err = blob.Finalize(backend.Index, sid.String()) if err != nil { return nil, err } debug.Log("Repo.SaveIndex", "Saved index as %v", sid.Str()) return sid, nil }
// AddKey adds a new key to an already existing repository. func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) { // fill meta data about key newkey := &Key{ Created: time.Now(), KDF: "scrypt", N: scryptN, R: scryptR, P: scryptP, } hn, err := os.Hostname() if err == nil { newkey.Hostname = hn } usr, err := user.Current() if err == nil { newkey.Username = usr.Username } // generate random salt newkey.Salt = make([]byte, scryptSaltsize) n, err := rand.Read(newkey.Salt) if n != scryptSaltsize || err != nil { panic("unable to read enough random bytes for salt") } // call KDF to derive user key newkey.user, err = crypto.KDF(newkey.N, newkey.R, newkey.P, newkey.Salt, password) if err != nil { return nil, err } if template == nil { // generate new random master keys newkey.master = crypto.NewRandomKey() } else { // copy master keys from old key newkey.master = template } // encrypt master keys (as json) with user key buf, err := json.Marshal(newkey.master) if err != nil { return nil, err } newkey.Data, err = crypto.Encrypt(newkey.user, nil, buf) // dump as json buf, err = json.Marshal(newkey) if err != nil { return nil, err } // store in repository and return blob, err := s.be.Create() if err != nil { return nil, err } plainhw := backend.NewHashingWriter(blob, sha256.New()) _, err = plainhw.Write(buf) if err != nil { return nil, err } name := backend.ID(plainhw.Sum(nil)).String() err = blob.Finalize(backend.Key, name) if err != nil { return nil, err } newkey.name = name return newkey, nil }
func map2id(id mapID) backend.ID { return backend.ID(id[:]) }