Example #1
0
// DecodeIndex loads and unserializes an index from rd.
func DecodeIndex(rd io.Reader) (idx *Index, err error) {
	debug.Log("Index.DecodeIndex", "Start decoding index")
	idxJSON := jsonIndex{}

	dec := json.NewDecoder(rd)
	err = dec.Decode(&idxJSON)
	if err != nil {
		debug.Log("Index.DecodeIndex", "Error %v", err)

		if isErrOldIndex(err) {
			debug.Log("Index.DecodeIndex", "index is probably old format, trying that")
			err = ErrOldIndexFormat
		}

		return nil, err
	}

	idx = NewIndex()
	for _, pack := range idxJSON.Packs {
		for _, blob := range pack.Blobs {
			idx.store(PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				Offset: blob.Offset,
				Length: blob.Length,
				PackID: pack.ID,
			})
		}
	}
	idx.supersedes = idxJSON.Supersedes
	idx.final = true

	debug.Log("Index.DecodeIndex", "done")
	return idx, err
}
Example #2
0
func (res *Restorer) restoreNodeTo(node *Node, dir string, dst string) error {
	debug.Log("Restorer.restoreNodeTo", "node %v, dir %v, dst %v", node.Name, dir, dst)
	dstPath := filepath.Join(dst, dir, node.Name)

	err := node.CreateAt(dstPath, res.repo)
	if err != nil {
		debug.Log("Restorer.restoreNodeTo", "node.CreateAt(%s) error %v", dstPath, err)
	}

	// Did it fail because of ENOENT?
	if err != nil && os.IsNotExist(errors.Cause(err)) {
		debug.Log("Restorer.restoreNodeTo", "create intermediate paths")

		// Create parent directories and retry
		err = os.MkdirAll(filepath.Dir(dstPath), 0700)
		if err == nil || err == os.ErrExist {
			err = node.CreateAt(dstPath, res.repo)
		}
	}

	if err != nil {
		debug.Log("Restorer.restoreNodeTo", "error %v", err)
		err = res.Error(dstPath, node, errors.Annotate(err, "create node"))
		if err != nil {
			return err
		}
	}

	debug.Log("Restorer.restoreNodeTo", "successfully restored %v", node.Name)

	return nil
}
Example #3
0
// loadIndex loads the index id and merges it with the currently used index.
func (s *Repository) loadIndex(id string) error {
	debug.Log("Repo.loadIndex", "Loading index %v", id[:8])
	before := len(s.idx.pack)

	rd, err := s.be.Get(backend.Index, id)
	defer rd.Close()
	if err != nil {
		return err
	}

	// decrypt
	decryptRd, err := crypto.DecryptFrom(s.key, rd)
	defer decryptRd.Close()
	if err != nil {
		return err
	}

	idx, err := DecodeIndex(decryptRd)
	if err != nil {
		debug.Log("Repo.loadIndex", "error while decoding index %v: %v", id, err)
		return err
	}

	s.idx.Merge(idx)

	after := len(s.idx.pack)
	debug.Log("Repo.loadIndex", "Loaded index %v, added %v blobs", id[:8], after-before)

	return nil
}
Example #4
0
// savePacker stores p in the backend.
func (s *Repository) savePacker(p *pack.Packer) error {
	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
	_, err := p.Finalize()
	if err != nil {
		return err
	}

	// move file to the final location
	sid := p.ID()
	err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String())
	if err != nil {
		debug.Log("Repo.savePacker", "blob Finalize() error: %v", err)
		return err
	}

	debug.Log("Repo.savePacker", "saved as %v", sid.Str())

	// update blobs in the index
	for _, b := range p.Blobs() {
		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), sid.Str())
		s.idx.Store(b.Type, b.ID, sid, b.Offset, uint(b.Length))
	}

	return nil
}
Example #5
0
// Stale returns true if the lock is stale. A lock is stale if the timestamp is
// older than 30 minutes or if it was created on the current machine and the
// process isn't alive any more.
func (l *Lock) Stale() bool {
	debug.Log("Lock.Stale", "testing if lock %v for process %d is stale", l.lockID.Str(), l.PID)
	if time.Now().Sub(l.Time) > staleTimeout {
		debug.Log("Lock.Stale", "lock is stale, timestamp is too old: %v\n", l.Time)
		return true
	}

	hn, err := os.Hostname()
	if err != nil {
		debug.Log("Lock.Stale", "unable to find current hostnanme: %v", err)
		// since we cannot find the current hostname, assume that the lock is
		// not stale.
		return false
	}

	if hn != l.Hostname {
		// lock was created on a different host, assume the lock is not stale.
		return false
	}

	// check if we can reach the process retaining the lock
	exists := l.processExists()
	if !exists {
		debug.Log("Lock.Stale", "could not reach process, %d, lock is probably stale\n", l.PID)
		return true
	}

	debug.Log("Lock.Stale", "lock not stale\n")
	return false
}
Example #6
0
// Walk starts walking the tree given by id. When the channel done is closed,
// processing stops.
func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) {
	debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
	defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)

	resCh := make(chan loadTreeResult, 1)
	tw.ch <- loadTreeJob{
		id:  id,
		res: resCh,
	}

	res := <-resCh
	if res.err != nil {
		select {
		case tw.out <- WalkTreeJob{Path: path, Error: res.err}:
		case <-done:
			return
		}
		return
	}

	tw.walk(path, res.tree, done)

	select {
	case tw.out <- WalkTreeJob{Path: path, Tree: res.tree}:
	case <-done:
		return
	}
}
Example #7
0
// DecodeOldIndex loads and unserializes an index in the old format from rd.
func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
	debug.Log("Index.DecodeOldIndex", "Start decoding old index")
	list := []*packJSON{}

	dec := json.NewDecoder(rd)
	err = dec.Decode(&list)
	if err != nil {
		debug.Log("Index.DecodeOldIndex", "Error %#v", err)
		return nil, err
	}

	idx = NewIndex()
	for _, pack := range list {
		for _, blob := range pack.Blobs {
			idx.store(PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				PackID: pack.ID,
				Offset: blob.Offset,
				Length: blob.Length,
			})
		}
	}
	idx.final = true

	debug.Log("Index.DecodeOldIndex", "done")
	return idx, err
}
Example #8
0
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
	data, err := p.Finalize()
	if err != nil {
		return err
	}

	id := backend.Hash(data)
	h := backend.Handle{Type: backend.Data, Name: id.String()}

	err = r.be.Save(h, data)
	if err != nil {
		debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
		return err
	}

	debug.Log("Repo.savePacker", "saved as %v", h)

	// update blobs in the index
	for _, b := range p.Blobs() {
		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), id.Str())
		r.idx.Current().Store(PackedBlob{
			Type:   b.Type,
			ID:     b.ID,
			PackID: id,
			Offset: b.Offset,
			Length: uint(b.Length),
		})
	}

	return nil
}
Example #9
0
func refreshLocks(wg *sync.WaitGroup, done <-chan struct{}) {
	debug.Log("main.refreshLocks", "start")
	defer func() {
		wg.Done()
		globalLocks.Lock()
		globalLocks.cancelRefresh = nil
		globalLocks.Unlock()
	}()

	ticker := time.NewTicker(refreshInterval)

	for {
		select {
		case <-done:
			debug.Log("main.refreshLocks", "terminate")
			return
		case <-ticker.C:
			debug.Log("main.refreshLocks", "refreshing locks")
			globalLocks.Lock()
			for _, lock := range globalLocks.locks {
				err := lock.Refresh()
				if err != nil {
					fmt.Fprintf(os.Stderr, "unable to refresh lock: %v\n", err)
				}
			}
			globalLocks.Unlock()
		}
	}
}
Example #10
0
// Save stores data in the backend at the handle.
func (be s3) Save(h backend.Handle, p []byte) (err error) {
	if err := h.Valid(); err != nil {
		return err
	}

	debug.Log("s3.Save", "%v bytes at %d", len(p), h)

	path := s3path(h.Type, h.Name)

	// Check key does not already exist
	_, err = be.client.StatObject(be.bucketname, path)
	if err == nil {
		debug.Log("s3.blob.Finalize()", "%v already exists", h)
		return errors.New("key already exists")
	}

	<-be.connChan
	defer func() {
		be.connChan <- struct{}{}
	}()

	debug.Log("s3.Save", "PutObject(%v, %v, %v, %v)",
		be.bucketname, path, int64(len(p)), "binary/octet-stream")
	n, err := be.client.PutObject(be.bucketname, path, bytes.NewReader(p), "binary/octet-stream")
	debug.Log("s3.Save", "%v -> %v bytes, err %#v", path, n, err)

	return err
}
Example #11
0
// repackBlob loads a single blob from src and saves it in dst.
func repackBlob(src, dst *repository.Repository, id backend.ID) error {
	blob, err := src.Index().Lookup(id)
	if err != nil {
		return err
	}

	debug.Log("RepackBlobs", "repacking blob %v, len %v", id.Str(), blob.PlaintextLength())

	buf := make([]byte, 0, blob.PlaintextLength())
	buf, err = src.LoadBlob(blob.Type, id, buf)
	if err != nil {
		return err
	}

	if uint(len(buf)) != blob.PlaintextLength() {
		debug.Log("RepackBlobs", "repack blob %v: len(buf) isn't equal to length: %v = %v", id.Str(), len(buf), blob.PlaintextLength())
		return errors.New("LoadBlob returned wrong data, len() doesn't match")
	}

	_, err = dst.SaveAndEncrypt(blob.Type, buf, &id)
	if err != nil {
		return err
	}

	return nil
}
Example #12
0
// Save stores data in the backend at the handle.
func (r *SFTP) Save(h backend.Handle, p []byte) (err error) {
	if err := h.Valid(); err != nil {
		return err
	}

	filename, tmpfile, err := r.tempFile()
	debug.Log("sftp.Save", "save %v (%d bytes) to %v", h, len(p), filename)

	n, err := tmpfile.Write(p)
	if err != nil {
		return err
	}

	if n != len(p) {
		return errors.New("not all bytes writen")
	}

	err = tmpfile.Close()
	if err != nil {
		return err
	}

	err = r.renameFile(filename, h.Type, h.Name)
	debug.Log("sftp.Save", "save %v: rename %v: %v",
		h, filepath.Base(filename), err)
	if err != nil {
		return fmt.Errorf("sftp: renameFile: %v", err)
	}

	return nil
}
Example #13
0
func packIDTester(repo *repository.Repository, inChan <-chan backend.ID, errChan chan<- error, wg *sync.WaitGroup, done <-chan struct{}) {
	debug.Log("Checker.testPackID", "worker start")
	defer debug.Log("Checker.testPackID", "worker done")

	defer wg.Done()

	for id := range inChan {
		ok, err := repo.Backend().Test(backend.Data, id.String())
		if err != nil {
			err = PackError{ID: id, Err: err}
		} else {
			if !ok {
				err = PackError{ID: id, Err: errors.New("does not exist")}
			}
		}

		if err != nil {
			debug.Log("Checker.testPackID", "error checking for pack %s: %v", id.Str(), err)
			select {
			case <-done:
				return
			case errChan <- err:
			}

			continue
		}

		debug.Log("Checker.testPackID", "pack %s exists", id.Str())
	}
}
Example #14
0
// Create the backend specified by URI.
func create(s string) (backend.Backend, error) {
	debug.Log("open", "parsing location %v", s)
	loc, err := location.Parse(s)
	if err != nil {
		return nil, err
	}

	switch loc.Scheme {
	case "local":
		debug.Log("open", "create local repository at %#v", loc.Config)
		return local.Create(loc.Config.(string))
	case "sftp":
		debug.Log("open", "create sftp repository at %#v", loc.Config)
		return sftp.CreateWithConfig(loc.Config.(sftp.Config))
	case "s3":
		cfg := loc.Config.(s3.Config)
		if cfg.KeyID == "" {
			cfg.KeyID = os.Getenv("AWS_ACCESS_KEY_ID")

		}
		if cfg.Secret == "" {
			cfg.Secret = os.Getenv("AWS_SECRET_ACCESS_KEY")
		}

		debug.Log("open", "create s3 repository at %#v", loc.Config)
		return s3.Open(cfg)
	}

	debug.Log("open", "invalid repository scheme: %v", s)
	return nil, fmt.Errorf("invalid scheme %q", loc.Scheme)
}
Example #15
0
func walkTree(repo *repository.Repository, path string, treeID backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
	debug.Log("walkTree", "start on %q (%v)", path, treeID.Str())

	t, err := LoadTree(repo, treeID)
	if err != nil {
		select {
		case jobCh <- WalkTreeJob{Path: path, Error: err}:
		case <-done:
			return
		}
		return
	}

	for _, node := range t.Nodes {
		p := filepath.Join(path, node.Name)
		if node.Type == "dir" {
			walkTree(repo, p, *node.Subtree, done, jobCh)
		} else {
			select {
			case jobCh <- WalkTreeJob{Path: p, Node: node}:
			case <-done:
				return
			}
		}
	}

	select {
	case jobCh <- WalkTreeJob{Path: path, Tree: t}:
	case <-done:
		return
	}

	debug.Log("walkTree", "done for %q (%v)", path, treeID.Str())
}
Example #16
0
// findPacker returns a packer for a new blob of size bytes. Either a new one is
// created or one is returned that already has some blobs.
func (r *packerManager) findPacker(size uint) (*pack.Packer, error) {
	r.pm.Lock()
	defer r.pm.Unlock()

	// search for a suitable packer
	if len(r.packs) > 0 {
		debug.Log("Repo.findPacker", "searching packer for %d bytes\n", size)
		for i, p := range r.packs {
			if p.Size()+size < maxPackSize {
				debug.Log("Repo.findPacker", "found packer %v", p)
				// remove from list
				r.packs = append(r.packs[:i], r.packs[i+1:]...)
				return p, nil
			}
		}
	}

	// no suitable packer found, return new
	blob, err := r.be.Create()
	if err != nil {
		return nil, err
	}
	debug.Log("Repo.findPacker", "create new pack %p for %d bytes", blob, size)
	return pack.NewPacker(r.key, blob), nil
}
Example #17
0
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
	_, err := p.Finalize()
	if err != nil {
		return err
	}

	// move file to the final location
	sid := p.ID()
	err = p.Writer().(backend.Blob).Finalize(backend.Data, sid.String())
	if err != nil {
		debug.Log("Repo.savePacker", "blob Finalize() error: %v", err)
		return err
	}

	debug.Log("Repo.savePacker", "saved as %v", sid.Str())

	// update blobs in the index
	for _, b := range p.Blobs() {
		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), sid.Str())
		r.idx.Current().Store(PackedBlob{
			Type:   b.Type,
			ID:     b.ID,
			PackID: sid,
			Offset: b.Offset,
			Length: uint(b.Length),
		})
		r.idx.RemoveFromInFlight(b.ID)
	}

	return nil
}
Example #18
0
func (node *Node) isNewer(path string, fi os.FileInfo) bool {
	if node.Type != "file" {
		debug.Log("node.isNewer", "node %v is newer: not file", path)
		return true
	}

	tpe := nodeTypeFromFileInfo(fi)
	if node.Name != fi.Name() || node.Type != tpe {
		debug.Log("node.isNewer", "node %v is newer: name or type changed", path)
		return true
	}

	extendedStat := fi.Sys().(*syscall.Stat_t)
	inode := extendedStat.Ino
	size := uint64(extendedStat.Size)

	if node.ModTime != fi.ModTime() ||
		node.ChangeTime != changeTime(extendedStat) ||
		node.Inode != uint64(inode) ||
		node.Size != size {
		debug.Log("node.isNewer", "node %v is newer: timestamp, size or inode changed", path)
		return true
	}

	debug.Log("node.isNewer", "node %v is not newer", path)
	return false
}
Example #19
0
// WalkTree walks the tree specified by id recursively and sends a job for each
// file and directory it finds. When the channel done is closed, processing
// stops.
func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
	debug.Log("WalkTree", "start on %v, start workers", id.Str())

	load := func(id backend.ID) (*Tree, error) {
		tree := &Tree{}
		err := repo.LoadJSONPack(pack.Tree, id, tree)
		if err != nil {
			return nil, err
		}
		return tree, nil
	}

	ch := make(chan loadTreeJob)

	var wg sync.WaitGroup
	for i := 0; i < loadTreeWorkers; i++ {
		wg.Add(1)
		go loadTreeWorker(&wg, ch, load, done)
	}

	tw := NewTreeWalker(ch, jobCh)
	tw.Walk("", id, done)
	close(jobCh)

	close(ch)
	wg.Wait()

	debug.Log("WalkTree", "done")
}
Example #20
0
func (j archiveJob) Copy() pipe.Job {
	if !j.hasOld {
		return j.new
	}

	// handle files
	if isRegularFile(j.new.Info()) {
		debug.Log("archiveJob.Copy", "   job %v is file", j.new.Path())

		// if type has changed, return new job directly
		if j.old.Node == nil {
			return j.new
		}

		// if file is newer, return the new job
		if j.old.Node.isNewer(j.new.Fullpath(), j.new.Info()) {
			debug.Log("archiveJob.Copy", "   job %v is newer", j.new.Path())
			return j.new
		}

		debug.Log("archiveJob.Copy", "   job %v add old data", j.new.Path())
		// otherwise annotate job with old data
		e := j.new.(pipe.Entry)
		e.Node = j.old.Node
		return e
	}

	// dirs and other types are just returned
	return j.new
}
Example #21
0
// DecodeIndex loads and unserializes an index from rd.
func DecodeIndex(rd io.Reader) (*Index, error) {
	debug.Log("Index.DecodeIndex", "Start decoding index")
	list := []*packJSON{}

	dec := json.NewDecoder(rd)
	err := dec.Decode(&list)
	if err != nil {
		return nil, err
	}

	idx := NewIndex()
	for _, pack := range list {
		packID, err := backend.ParseID(pack.ID)
		if err != nil {
			debug.Log("Index.DecodeIndex", "error parsing pack ID %q: %v", pack.ID, err)
			return nil, err
		}

		for _, blob := range pack.Blobs {
			blobID, err := backend.ParseID(blob.ID)
			if err != nil {
				debug.Log("Index.DecodeIndex", "error parsing blob ID %q: %v", blob.ID, err)
				return nil, err
			}

			idx.store(blob.Type, blobID, packID, blob.Offset, blob.Length, true)
		}
	}

	debug.Log("Index.DecodeIndex", "done")
	return idx, err
}
Example #22
0
// Dump writes the pretty-printed JSON representation of the index to w.
func (idx *Index) Dump(w io.Writer) error {
	debug.Log("Index.Dump", "dumping index")
	idx.m.Lock()
	defer idx.m.Unlock()

	list, err := idx.generatePackList()
	if err != nil {
		return err
	}

	outer := jsonIndex{
		Supersedes: idx.Supersedes(),
		Packs:      list,
	}

	buf, err := json.MarshalIndent(outer, "", "  ")
	if err != nil {
		return err
	}

	_, err = w.Write(append(buf, '\n'))
	if err != nil {
		return err
	}

	debug.Log("Index.Dump", "done")

	return nil
}
Example #23
0
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
	debug.Log("Repo.Load", "load %v with id %v", t, id.Str())

	rd, err := r.be.Get(t, id.String())
	if err != nil {
		debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
		return nil, err
	}

	buf, err := ioutil.ReadAll(rd)
	if err != nil {
		return nil, err
	}

	err = rd.Close()
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(buf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	// decrypt
	plain, err := r.Decrypt(buf)
	if err != nil {
		return nil, err
	}

	return plain, nil
}
Example #24
0
// checkTreeWorker checks the trees received and sends out errors to errChan.
func (c *Checker) checkTreeWorker(in <-chan treeJob, out chan<- TreeError, done <-chan struct{}, wg *sync.WaitGroup) {
	defer func() {
		debug.Log("checker.checkTreeWorker", "exiting")
		wg.Done()
	}()

	var (
		inCh      = in
		outCh     = out
		treeError TreeError
	)

	outCh = nil
	for {
		select {
		case <-done:
			return

		case job, ok := <-inCh:
			if !ok {
				return
			}

			id := job.ID
			alreadyChecked := false
			c.blobRefs.Lock()
			if c.blobRefs.M[id] > 0 {
				alreadyChecked = true
			}
			c.blobRefs.M[id]++
			debug.Log("checker.checkTreeWorker", "tree %v refcount %d", job.ID.Str(), c.blobRefs.M[id])
			c.blobRefs.Unlock()

			if alreadyChecked {
				continue
			}

			debug.Log("checker.checkTreeWorker", "load tree %v", job.ID.Str())

			errs := c.checkTree(job.ID, job.Tree)
			if len(errs) > 0 {
				debug.Log("checker.checkTreeWorker", "checked tree %v: %v errors", job.ID.Str(), len(errs))
				treeError = TreeError{ID: job.ID, Errors: errs}
				outCh = out
				inCh = nil
			}

		case outCh <- treeError:
			debug.Log("checker.checkTreeWorker", "tree %v: sent %d errors", treeError.ID, len(treeError.Errors))
			outCh = nil
			inCh = in
		}
	}
}
Example #25
0
// Scan traverses the dirs to collect Stat information while emitting progress
// information with p.
func Scan(dirs []string, filter pipe.SelectFunc, p *Progress) (Stat, error) {
	p.Start()
	defer p.Done()

	var stat Stat

	for _, dir := range dirs {
		debug.Log("Scan", "Start for %v", dir)
		err := filepath.Walk(dir, func(str string, fi os.FileInfo, err error) error {
			debug.Log("Scan.Walk", "%v, fi: %v, err: %v", str, fi, err)
			// TODO: integrate error reporting
			if err != nil {
				fmt.Fprintf(os.Stderr, "error for %v: %v\n", str, err)
				return nil
			}
			if fi == nil {
				fmt.Fprintf(os.Stderr, "error for %v: FileInfo is nil\n", str)
				return nil
			}

			if !filter(str, fi) {
				debug.Log("Scan.Walk", "path %v excluded", str)
				if fi.IsDir() {
					return filepath.SkipDir
				}
				return nil
			}

			s := Stat{}
			if fi.IsDir() {
				s.Dirs++
			} else {
				s.Files++

				if isRegularFile(fi) {
					s.Bytes += uint64(fi.Size())
				}
			}

			p.Report(s)
			stat.Add(s)

			// TODO: handle error?
			return nil
		})

		debug.Log("Scan", "Done for %v, err: %v", dir, err)
		if err != nil {
			return Stat{}, err
		}
	}

	return stat, nil
}
Example #26
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	blob, err := r.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	plaintextBufSize := uint(cap(plaintextBuf))
	if blob.PlaintextLength() > plaintextBufSize {
		return nil, fmt.Errorf("buf is too small, need %d more bytes", blob.PlaintextLength()-plaintextBufSize)
	}

	if blob.Type != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)

	// load blob from pack
	rd, err := r.be.GetReader(backend.Data, blob.PackID.String(), blob.Offset, blob.Length)
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
		return nil, err
	}

	// make buffer that is large enough for the complete blob
	ciphertextBuf := make([]byte, blob.Length)
	_, err = io.ReadFull(rd, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	err = rd.Close()
	if err != nil {
		return nil, err
	}

	// decrypt
	plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plaintextBuf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plaintextBuf, nil
}
Example #27
0
// Split feeds all elements read from inChan to dirChan and entChan.
func Split(inChan <-chan Job, dirChan chan<- Dir, entChan chan<- Entry) {
	debug.Log("pipe.Split", "start")
	defer debug.Log("pipe.Split", "done")

	inCh := inChan
	dirCh := dirChan
	entCh := entChan

	var (
		dir Dir
		ent Entry
	)

	// deactivate sending until we received at least one job
	dirCh = nil
	entCh = nil
	for {
		select {
		case job, ok := <-inCh:
			if !ok {
				// channel is closed
				return
			}

			if job == nil {
				panic("nil job received")
			}

			// disable receiving until the current job has been sent
			inCh = nil

			switch j := job.(type) {
			case Dir:
				dir = j
				dirCh = dirChan
			case Entry:
				ent = j
				entCh = entChan
			default:
				panic(fmt.Sprintf("unknown job type %v", j))
			}
		case dirCh <- dir:
			// disable sending, re-enable receiving
			dirCh = nil
			inCh = inChan
		case entCh <- ent:
			// disable sending, re-enable receiving
			entCh = nil
			inCh = inChan
		}
	}
}
Example #28
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	packID, tpe, offset, length, err := r.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	if length > uint(cap(plaintextBuf))+crypto.Extension {
		return nil, fmt.Errorf("buf is too small, need %d more bytes", length-uint(cap(plaintextBuf))-crypto.Extension)
	}

	if tpe != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, tpe)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", tpe, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)

	// load blob from pack
	rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length)
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
		return nil, err
	}

	// make buffer that is large enough for the complete blob
	ciphertextBuf := make([]byte, length)
	_, err = io.ReadFull(rd, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	err = rd.Close()
	if err != nil {
		return nil, err
	}

	// decrypt
	plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plaintextBuf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plaintextBuf, nil
}
Example #29
0
// Lookup returns the pack for the id.
func (idx *Index) Lookup(id backend.ID) (packID backend.ID, tpe pack.BlobType, offset, length uint, err error) {
	idx.m.Lock()
	defer idx.m.Unlock()

	if p, ok := idx.pack[id.String()]; ok {
		debug.Log("Index.Lookup", "id %v found in pack %v at %d, length %d",
			id.Str(), p.packID.Str(), p.offset, p.length)
		return p.packID, p.tpe, p.offset, p.length, nil
	}

	debug.Log("Index.Lookup", "id %v not found", id.Str())
	return nil, pack.Data, 0, 0, fmt.Errorf("id %v not found in index", id)
}
Example #30
0
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data is small
// enough, it will be packed together with other small blobs.
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) {
	if id == nil {
		// compute plaintext hash
		hashedID := backend.Hash(data)
		id = &hashedID
	}

	debug.Log("Repo.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))

	// get buf from the pool
	ciphertext := getBuf()
	defer freeBuf(ciphertext)

	// encrypt blob
	ciphertext, err := r.Encrypt(ciphertext, data)
	if err != nil {
		return backend.ID{}, err
	}

	// add this id to the list of in-flight chunk ids.
	debug.Log("Repo.Save", "add %v to list of in-flight IDs", id.Str())
	err = r.idx.AddInFlight(*id)
	if err != nil {
		debug.Log("Repo.Save", "another goroutine is already working on %v (%v) does already exist", id.Str, t)
		return *id, nil
	}

	// find suitable packer and add blob
	packer, err := r.findPacker(uint(len(ciphertext)))
	if err != nil {
		r.idx.RemoveFromInFlight(*id)
		return backend.ID{}, err
	}

	// save ciphertext
	_, err = packer.Add(t, *id, bytes.NewReader(ciphertext))
	if err != nil {
		return backend.ID{}, err
	}

	// if the pack is not full enough and there are less than maxPackers
	// packers, put back to the list
	if packer.Size() < minPackSize && r.countPacker() < maxPackers {
		debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
		r.insertPacker(packer)
		return *id, nil
	}

	// else write the pack to the backend
	return *id, r.savePacker(packer)
}