Esempio n. 1
0
func TestSave(t *testing.T) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	for _, size := range testSizes {
		data := make([]byte, size)
		_, err := io.ReadFull(rand.Reader, data)
		OK(t, err)

		id := backend.Hash(data)

		// save
		sid, err := repo.SaveAndEncrypt(pack.Data, data, nil)
		OK(t, err)

		Equals(t, id, sid)

		OK(t, repo.Flush())
		// OK(t, repo.SaveIndex())

		// read back
		buf, err := repo.LoadBlob(pack.Data, id, make([]byte, size))
		OK(t, err)

		Assert(t, len(buf) == len(data),
			"number of bytes read back does not match: expected %d, got %d",
			len(data), len(buf))

		Assert(t, bytes.Equal(buf, data),
			"data does not match: expected %02x, got %02x",
			data, buf)
	}
}
Esempio n. 2
0
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
	data, err := p.Finalize()
	if err != nil {
		return err
	}

	id := backend.Hash(data)
	h := backend.Handle{Type: backend.Data, Name: id.String()}

	err = r.be.Save(h, data)
	if err != nil {
		debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
		return err
	}

	debug.Log("Repo.savePacker", "saved as %v", h)

	// update blobs in the index
	for _, b := range p.Blobs() {
		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), id.Str())
		r.idx.Current().Store(PackedBlob{
			Type:   b.Type,
			ID:     b.ID,
			PackID: id,
			Offset: b.Offset,
			Length: uint(b.Length),
		})
	}

	return nil
}
func saveFile(t testing.TB, be Saver, filename string, n int) {
	f, err := os.Open(filename)
	if err != nil {
		t.Fatal(err)
	}

	data := make([]byte, n)
	m, err := io.ReadFull(f, data)

	if m != n {
		t.Fatalf("read wrong number of bytes from %v: want %v, got %v", filename, m, n)
	}

	if err = f.Close(); err != nil {
		t.Fatal(err)
	}

	h := backend.Handle{Type: backend.Data, Name: backend.Hash(data).String()}

	err = be.Save(h, data)
	if err != nil {
		t.Fatal(err)
	}

	err = os.Remove(filename)
	if err != nil {
		t.Fatal(err)
	}
}
Esempio n. 4
0
func TestLoadLargeBuffer(t *testing.T) {
	b := mem.New()

	for i := 0; i < 20; i++ {
		data := Random(23+i, rand.Intn(MiB)+500*KiB)

		id := backend.Hash(data)
		err := b.Save(backend.Handle{Name: id.String(), Type: backend.Data}, data)
		OK(t, err)

		buf := make([]byte, len(data)+100)
		buf, err = backend.LoadAll(b, backend.Handle{Type: backend.Data, Name: id.String()}, buf)
		OK(t, err)

		if len(buf) != len(data) {
			t.Errorf("length of returned buffer does not match, want %d, got %d", len(data), len(buf))
			continue
		}

		if !bytes.Equal(buf, data) {
			t.Errorf("wrong data returned")
			continue
		}
	}
}
Esempio n. 5
0
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error {
	debug.Log("Checker.checkPack", "checking pack %v", id.Str())
	h := backend.Handle{Type: backend.Data, Name: id.String()}
	buf, err := backend.LoadAll(r.Backend(), h, nil)
	if err != nil {
		return err
	}

	hash := backend.Hash(buf)
	if !hash.Equal(id) {
		debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
		return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str())
	}

	unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf))
	if err != nil {
		return err
	}

	var errs []error
	for i, blob := range unpacker.Entries {
		debug.Log("Checker.checkPack", "  check blob %d: %v", i, blob.ID.Str())

		plainBuf := make([]byte, blob.Length)
		plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
		if err != nil {
			debug.Log("Checker.checkPack", "  error decrypting blob %v: %v", blob.ID.Str(), err)
			errs = append(errs, fmt.Errorf("blob %v: %v", i, err))
			continue
		}

		hash := backend.Hash(plainBuf)
		if !hash.Equal(blob.ID) {
			debug.Log("Checker.checkPack", "  Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
			errs = append(errs, fmt.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
			continue
		}
	}

	if len(errs) > 0 {
		return fmt.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
	}

	return nil
}
Esempio n. 6
0
// savePacker stores p in the backend.
func (r *Repository) savePacker(p *pack.Packer) error {
	debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
	n, err := p.Finalize()
	if err != nil {
		return err
	}

	tmpfile := p.Writer().(*os.File)
	f, err := os.Open(tmpfile.Name())
	if err != nil {
		return err
	}

	data := make([]byte, n)
	m, err := io.ReadFull(f, data)

	if uint(m) != n {
		return fmt.Errorf("read wrong number of bytes from %v: want %v, got %v", tmpfile.Name(), n, m)
	}

	if err = f.Close(); err != nil {
		return err
	}

	id := backend.Hash(data)
	h := backend.Handle{Type: backend.Data, Name: id.String()}

	err = r.be.Save(h, data)
	if err != nil {
		debug.Log("Repo.savePacker", "Save(%v) error: %v", h, err)
		return err
	}

	debug.Log("Repo.savePacker", "saved as %v", h)

	err = os.Remove(tmpfile.Name())
	if err != nil {
		return err
	}

	// update blobs in the index
	for _, b := range p.Blobs() {
		debug.Log("Repo.savePacker", "  updating blob %v to pack %v", b.ID.Str(), id.Str())
		r.idx.Current().Store(PackedBlob{
			Type:   b.Type,
			ID:     b.ID,
			PackID: id,
			Offset: b.Offset,
			Length: uint(b.Length),
		})
	}

	return nil
}
Esempio n. 7
0
func genTestContent() map[backend.ID][]byte {
	m := make(map[backend.ID][]byte)

	for _, length := range testContentLengths {
		buf := Random(int(length), int(length))
		id := backend.Hash(buf)
		m[id] = buf
		testMaxFileSize += length
	}

	return m
}
Esempio n. 8
0
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) {
	debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
	// lookup pack
	blob, err := r.idx.Lookup(id)
	if err != nil {
		debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
		return nil, err
	}

	plaintextBufSize := uint(cap(plaintextBuf))
	if blob.PlaintextLength() > plaintextBufSize {
		debug.Log("Repo.LoadBlob", "need to expand buffer: want %d bytes, got %d",
			blob.PlaintextLength(), plaintextBufSize)
		plaintextBuf = make([]byte, blob.PlaintextLength())
	}

	if blob.Type != t {
		debug.Log("Repo.LoadBlob", "wrong type returned for %v: wanted %v, got %v", id.Str(), t, blob.Type)
		return nil, fmt.Errorf("blob has wrong type %v (wanted: %v)", blob.Type, t)
	}

	debug.Log("Repo.LoadBlob", "id %v found: %v", id.Str(), blob)

	// load blob from pack
	h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
	ciphertextBuf := make([]byte, blob.Length)
	n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
	if err != nil {
		debug.Log("Repo.LoadBlob", "error loading blob %v: %v", blob, err)
		return nil, err
	}

	if uint(n) != blob.Length {
		debug.Log("Repo.LoadBlob", "error loading blob %v: wrong length returned, want %d, got %d",
			blob.Length, uint(n))
		return nil, errors.New("wrong length returned")
	}

	// decrypt
	plaintextBuf, err = r.decryptTo(plaintextBuf, ciphertextBuf)
	if err != nil {
		return nil, err
	}

	// check hash
	if !backend.Hash(plaintextBuf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	return plaintextBuf, nil
}
Esempio n. 9
0
func TestUnpackReadSeeker(t *testing.T) {
	// create random keys
	k := crypto.NewRandomKey()

	bufs, packData, packSize := newPack(t, k)

	b := mem.New()
	id := backend.Hash(packData)

	handle := backend.Handle{Type: backend.Data, Name: id.String()}
	OK(t, b.Save(handle, packData))
	rd := backend.NewReadSeeker(b, handle)
	verifyBlobs(t, bufs, k, rd, packSize)
}
Esempio n. 10
0
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
	defer freeBuf(chunk.Data)

	id := backend.Hash(chunk.Data)
	err := arch.Save(pack.Data, chunk.Data, id)
	// TODO handle error
	if err != nil {
		panic(err)
	}

	p.Report(Stat{Bytes: uint64(chunk.Length)})
	arch.blobToken <- token
	resultChannel <- saveResult{id: id, bytes: uint64(chunk.Length)}
}
Esempio n. 11
0
// saveTreeJSON stores a tree in the repository.
func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) {
	data, err := json.Marshal(item)
	if err != nil {
		return backend.ID{}, err
	}
	data = append(data, '\n')

	// check if tree has been saved before
	id := backend.Hash(data)
	if repo.Index().Has(id) {
		return id, nil
	}

	return repo.SaveJSON(pack.Tree, item)
}
Esempio n. 12
0
// SaveTreeJSON stores a tree in the repository.
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
	data, err := json.Marshal(item)
	if err != nil {
		return backend.ID{}, err
	}
	data = append(data, '\n')

	// check if tree has been saved before
	id := backend.Hash(data)
	if arch.isKnownBlob(id) {
		return id, nil
	}

	return arch.repo.SaveJSON(pack.Tree, item)
}
Esempio n. 13
0
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
// storage hash.
func (r *Repository) SaveUnpacked(t backend.Type, p []byte) (id backend.ID, err error) {
	ciphertext := make([]byte, len(p)+crypto.Extension)
	ciphertext, err = r.Encrypt(ciphertext, p)
	if err != nil {
		return backend.ID{}, err
	}

	id = backend.Hash(ciphertext)
	h := backend.Handle{Type: t, Name: id.String()}

	err = r.be.Save(h, ciphertext)
	if err != nil {
		debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err)
		return backend.ID{}, err
	}

	debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h)
	return id, nil
}
Esempio n. 14
0
func testParallelSaveWithDuplication(t *testing.T, seed int) {
	repo := SetupRepo()
	defer TeardownRepo(repo)

	dataSizeMb := 128
	duplication := 7

	arch := restic.NewArchiver(repo)
	chunks := getRandomData(seed, dataSizeMb*1024*1024)

	errChannels := [](<-chan error){}

	// interweaved processing of subsequent chunks
	maxParallel := 2*duplication - 1
	barrier := make(chan struct{}, maxParallel)

	for _, c := range chunks {
		for dupIdx := 0; dupIdx < duplication; dupIdx++ {
			errChan := make(chan error)
			errChannels = append(errChannels, errChan)

			go func(c chunker.Chunk, errChan chan<- error) {
				barrier <- struct{}{}

				id := backend.Hash(c.Data)
				time.Sleep(time.Duration(id[0]))
				err := arch.Save(pack.Data, c.Data, id)
				<-barrier
				errChan <- err
			}(c, errChan)
		}
	}

	for _, errChan := range errChannels {
		OK(t, <-errChan)
	}

	OK(t, repo.Flush())
	OK(t, repo.SaveIndex())

	chkr := createAndInitChecker(t, repo)
	assertNoUnreferencedPacks(t, chkr)
}
Esempio n. 15
0
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs.
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) {
	if id == nil {
		// compute plaintext hash
		hashedID := backend.Hash(data)
		id = &hashedID
	}

	debug.Log("Repo.Save", "save id %v (%v, %d bytes)", id.Str(), t, len(data))

	// get buf from the pool
	ciphertext := getBuf()
	defer freeBuf(ciphertext)

	// encrypt blob
	ciphertext, err := r.Encrypt(ciphertext, data)
	if err != nil {
		return backend.ID{}, err
	}

	// find suitable packer and add blob
	packer, err := r.findPacker(uint(len(ciphertext)))
	if err != nil {
		return backend.ID{}, err
	}

	// save ciphertext
	_, err = packer.Add(t, *id, ciphertext)
	if err != nil {
		return backend.ID{}, err
	}

	// if the pack is not full enough and there are less than maxPackers
	// packers, put back to the list
	if packer.Size() < minPackSize && r.countPacker() < maxPackers {
		debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
		r.insertPacker(packer)
		return *id, nil
	}

	// else write the pack to the backend
	return *id, r.savePacker(packer)
}
Esempio n. 16
0
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
	debug.Log("Repo.Load", "load %v with id %v", t, id.Str())

	h := backend.Handle{Type: t, Name: id.String()}
	buf, err := backend.LoadAll(r.be, h, nil)
	if err != nil {
		debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
		return nil, err
	}

	if t != backend.Config && !backend.Hash(buf).Equal(id) {
		return nil, errors.New("invalid data returned")
	}

	// decrypt
	plain, err := r.Decrypt(buf)
	if err != nil {
		return nil, err
	}

	return plain, nil
}
Esempio n. 17
0
func TestReadSeeker(t *testing.T) {
	b := mem.New()

	length := rand.Intn(1<<24) + 2000

	data := Random(23, length)
	id := backend.Hash(data)

	handle := backend.Handle{Type: backend.Data, Name: id.String()}
	err := b.Save(handle, data)
	if err != nil {
		t.Fatalf("Save() error: %v", err)
	}

	for i := 0; i < 50; i++ {
		l := rand.Intn(length + 2000)
		o := rand.Intn(length + 2000)

		if rand.Float32() > 0.5 {
			o = -o
		}

		d := data
		if o > 0 && o < len(d) {
			d = d[o:]
		} else {
			o = len(d)
			d = d[:0]
		}

		if l > 0 && l < len(d) {
			d = d[:l]
		}

		rd := backend.NewReadSeeker(b, handle)
		loadAndCompare(t, rd, len(data), int64(o), d)
	}
}
Esempio n. 18
0
// TestLoad tests the backend's Load function.
func TestLoad(t testing.TB) {
	b := open(t)
	defer close(t)

	_, err := b.Load(backend.Handle{}, nil, 0)
	if err == nil {
		t.Fatalf("Load() did not return an error for invalid handle")
	}

	_, err = b.Load(backend.Handle{Type: backend.Data, Name: "foobar"}, nil, 0)
	if err == nil {
		t.Fatalf("Load() did not return an error for non-existing blob")
	}

	length := rand.Intn(1<<24) + 2000

	data := Random(23, length)
	id := backend.Hash(data)

	handle := backend.Handle{Type: backend.Data, Name: id.String()}
	err = b.Save(handle, data)
	if err != nil {
		t.Fatalf("Save() error: %v", err)
	}

	for i := 0; i < 50; i++ {
		l := rand.Intn(length + 2000)
		o := rand.Intn(length + 2000)

		d := data
		if o < len(d) {
			d = d[o:]
		} else {
			o = len(d)
			d = d[:0]
		}

		if l > 0 && l < len(d) {
			d = d[:l]
		}

		buf := make([]byte, l)
		n, err := b.Load(handle, buf, int64(o))

		// if we requested data beyond the end of the file, ignore
		// ErrUnexpectedEOF error
		if l > len(d) && err == io.ErrUnexpectedEOF {
			err = nil
			buf = buf[:len(d)]
		}

		if err != nil {
			t.Errorf("Load(%d, %d): unexpected error: %v", len(buf), int64(o), err)
			continue
		}

		if n != len(buf) {
			t.Errorf("Load(%d, %d): wrong length returned, want %d, got %d",
				len(buf), int64(o), len(buf), n)
			continue
		}

		buf = buf[:n]
		if !bytes.Equal(buf, d) {
			t.Errorf("Load(%d, %d) returned wrong bytes", len(buf), int64(o))
			continue
		}
	}

	// load with a too-large buffer, this should return io.ErrUnexpectedEOF
	buf := make([]byte, length+100)
	n, err := b.Load(handle, buf, 0)
	if n != length {
		t.Errorf("wrong length for larger buffer returned, want %d, got %d", length, n)
	}

	if err != io.ErrUnexpectedEOF {
		t.Errorf("wrong error returned for larger buffer: want io.ErrUnexpectedEOF, got %#v", err)
	}

	OK(t, b.Remove(backend.Data, id.String()))
}
Esempio n. 19
0
func store(t testing.TB, b backend.Backend, tpe backend.Type, data []byte) {
	id := backend.Hash(data)
	err := b.Save(backend.Handle{Name: id.String(), Type: tpe}, data)
	OK(t, err)
}
Esempio n. 20
0
// ArchiveReader reads from the reader and archives the data. Returned is the
// resulting snapshot and its ID.
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
	debug.Log("ArchiveReader", "start archiving %s", name)
	sn, err := NewSnapshot([]string{name})
	if err != nil {
		return nil, backend.ID{}, err
	}

	p.Start()
	defer p.Done()

	chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)

	var ids backend.IDs
	var fileSize uint64

	for {
		chunk, err := chnker.Next(getBuf())
		if err == io.EOF {
			break
		}

		if err != nil {
			return nil, backend.ID{}, err
		}

		id := backend.Hash(chunk.Data)

		if !repo.Index().Has(id) {
			_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
			if err != nil {
				return nil, backend.ID{}, err
			}
			debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
		} else {
			debug.Log("ArchiveReader", "blob %v already saved in the repo\n", id.Str())
		}

		freeBuf(chunk.Data)

		ids = append(ids, id)

		p.Report(Stat{Bytes: uint64(chunk.Length)})
		fileSize += uint64(chunk.Length)
	}

	tree := &Tree{
		Nodes: []*Node{
			&Node{
				Name:       name,
				AccessTime: time.Now(),
				ModTime:    time.Now(),
				Type:       "file",
				Mode:       0644,
				Size:       fileSize,
				UID:        sn.UID,
				GID:        sn.GID,
				User:       sn.Username,
				Content:    ids,
			},
		},
	}

	treeID, err := saveTreeJSON(repo, tree)
	if err != nil {
		return nil, backend.ID{}, err
	}
	sn.Tree = &treeID
	debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())

	id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
	if err != nil {
		return nil, backend.ID{}, err
	}

	sn.id = &id
	debug.Log("ArchiveReader", "snapshot saved as %v", id.Str())

	err = repo.Flush()
	if err != nil {
		return nil, backend.ID{}, err
	}

	err = repo.SaveIndex()
	if err != nil {
		return nil, backend.ID{}, err
	}

	return sn, id, nil
}
Esempio n. 21
0
// AddKey adds a new key to an already existing repository.
func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error) {
	// fill meta data about key
	newkey := &Key{
		Created: time.Now(),
		KDF:     "scrypt",
		N:       scryptN,
		R:       scryptR,
		P:       scryptP,
	}

	hn, err := os.Hostname()
	if err == nil {
		newkey.Hostname = hn
	}

	usr, err := user.Current()
	if err == nil {
		newkey.Username = usr.Username
	}

	// generate random salt
	newkey.Salt = make([]byte, scryptSaltsize)
	n, err := rand.Read(newkey.Salt)
	if n != scryptSaltsize || err != nil {
		panic("unable to read enough random bytes for salt")
	}

	// call KDF to derive user key
	newkey.user, err = crypto.KDF(newkey.N, newkey.R, newkey.P, newkey.Salt, password)
	if err != nil {
		return nil, err
	}

	if template == nil {
		// generate new random master keys
		newkey.master = crypto.NewRandomKey()
	} else {
		// copy master keys from old key
		newkey.master = template
	}

	// encrypt master keys (as json) with user key
	buf, err := json.Marshal(newkey.master)
	if err != nil {
		return nil, err
	}

	newkey.Data, err = crypto.Encrypt(newkey.user, nil, buf)

	// dump as json
	buf, err = json.Marshal(newkey)
	if err != nil {
		return nil, err
	}

	// store in repository and return
	h := backend.Handle{
		Type: backend.Key,
		Name: backend.Hash(buf).String(),
	}

	err = s.be.Save(h, buf)
	if err != nil {
		return nil, err
	}

	newkey.name = h.Name

	return newkey, nil
}
Esempio n. 22
0
func (cmd CmdRebuildIndex) RebuildIndex() error {
	debug.Log("RebuildIndex.RebuildIndex", "start")

	done := make(chan struct{})
	defer close(done)

	indexIDs := backend.NewIDSet()
	for id := range cmd.repo.List(backend.Index, done) {
		indexIDs.Insert(id)
	}

	cmd.global.Printf("rebuilding index from %d indexes\n", len(indexIDs))

	debug.Log("RebuildIndex.RebuildIndex", "found %v indexes", len(indexIDs))

	combinedIndex := repository.NewIndex()
	packsDone := backend.NewIDSet()

	type Blob struct {
		id  backend.ID
		tpe pack.BlobType
	}
	blobsDone := make(map[Blob]struct{})

	i := 0
	for indexID := range indexIDs {
		cmd.global.Printf("  loading index %v\n", i)

		debug.Log("RebuildIndex.RebuildIndex", "load index %v", indexID.Str())
		idx, err := repository.LoadIndex(cmd.repo, indexID.String())
		if err != nil {
			return err
		}

		debug.Log("RebuildIndex.RebuildIndex", "adding blobs from index %v", indexID.Str())

		for packedBlob := range idx.Each(done) {
			packsDone.Insert(packedBlob.PackID)
			b := Blob{
				id:  packedBlob.ID,
				tpe: packedBlob.Type,
			}
			if _, ok := blobsDone[b]; ok {
				continue
			}

			blobsDone[b] = struct{}{}
			combinedIndex.Store(packedBlob)
		}

		combinedIndex.AddToSupersedes(indexID)

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}

		i++
	}

	var err error
	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("removing %d old indexes\n", len(indexIDs))
	for id := range indexIDs {
		debug.Log("RebuildIndex.RebuildIndex", "remove index %v", id.Str())

		err := cmd.repo.Backend().Remove(backend.Index, id.String())
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error removing index %v: %v", id.Str(), err)
			return err
		}
	}

	cmd.global.Printf("checking for additional packs\n")
	newPacks := 0
	var buf []byte
	for packID := range cmd.repo.List(backend.Data, done) {
		if packsDone.Has(packID) {
			continue
		}

		debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str())
		newPacks++

		var err error

		h := backend.Handle{Type: backend.Data, Name: packID.String()}
		buf, err = backend.LoadAll(cmd.repo.Backend(), h, buf)
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error while loading pack %v", packID.Str())
			return fmt.Errorf("error while loading pack %v: %v", packID.Str(), err)
		}

		hash := backend.Hash(buf)
		if !hash.Equal(packID) {
			debug.Log("RebuildIndex.RebuildIndex", "Pack ID does not match, want %v, got %v", packID.Str(), hash.Str())
			return fmt.Errorf("Pack ID does not match, want %v, got %v", packID.Str(), hash.Str())
		}

		up, err := pack.NewUnpacker(cmd.repo.Key(), bytes.NewReader(buf))
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str())
			return err
		}

		for _, blob := range up.Entries {
			debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob)
			combinedIndex.Store(repository.PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				PackID: packID,
				Offset: blob.Offset,
				Length: blob.Length,
			})
		}

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}
	}

	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("added %d packs to the index\n", newPacks)

	debug.Log("RebuildIndex.RebuildIndex", "done")
	return nil
}