Ejemplo n.º 1
0
// checkPack reads a pack and checks the integrity of all blobs.
func checkPack(r *repository.Repository, id backend.ID) error {
	debug.Log("Checker.checkPack", "checking pack %v", id.Str())
	rd, err := r.Backend().Get(backend.Data, id.String())
	if err != nil {
		return err
	}

	buf, err := ioutil.ReadAll(rd)
	if err != nil {
		return err
	}

	err = rd.Close()
	if err != nil {
		return err
	}

	unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf))
	if err != nil {
		return err
	}

	var errs []error
	for i, blob := range unpacker.Entries {
		debug.Log("Checker.checkPack", "  check blob %d: %v", i, blob.ID.Str())

		plainBuf := make([]byte, blob.Length)
		plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length])
		if err != nil {
			debug.Log("Checker.checkPack", "  error decrypting blob %v: %v", blob.ID.Str(), err)
			errs = append(errs, fmt.Errorf("blob %v: %v", i, err))
			continue
		}

		hash := backend.Hash(plainBuf)
		if !hash.Equal(blob.ID) {
			debug.Log("Checker.checkPack", "  ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())
			errs = append(errs, fmt.Errorf("ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()))
			continue
		}
	}

	if len(errs) > 0 {
		return fmt.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs)
	}

	return nil
}
Ejemplo n.º 2
0
func (cmd CmdRebuildIndex) RebuildIndex() error {
	debug.Log("RebuildIndex.RebuildIndex", "start")

	done := make(chan struct{})
	defer close(done)

	indexIDs := backend.NewIDSet()
	for id := range cmd.repo.List(backend.Index, done) {
		indexIDs.Insert(id)
	}

	cmd.global.Printf("rebuilding index from %d indexes\n", len(indexIDs))

	debug.Log("RebuildIndex.RebuildIndex", "found %v indexes", len(indexIDs))

	combinedIndex := repository.NewIndex()
	packsDone := backend.NewIDSet()

	type Blob struct {
		id  backend.ID
		tpe pack.BlobType
	}
	blobsDone := make(map[Blob]struct{})

	i := 0
	for indexID := range indexIDs {
		cmd.global.Printf("  loading index %v\n", i)

		debug.Log("RebuildIndex.RebuildIndex", "load index %v", indexID.Str())
		idx, err := repository.LoadIndex(cmd.repo, indexID.String())
		if err != nil {
			return err
		}

		debug.Log("RebuildIndex.RebuildIndex", "adding blobs from index %v", indexID.Str())

		for packedBlob := range idx.Each(done) {
			packsDone.Insert(packedBlob.PackID)
			b := Blob{
				id:  packedBlob.ID,
				tpe: packedBlob.Type,
			}
			if _, ok := blobsDone[b]; ok {
				continue
			}

			blobsDone[b] = struct{}{}
			combinedIndex.Store(packedBlob)
		}

		combinedIndex.AddToSupersedes(indexID)

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}

		i++
	}

	var err error
	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("removing %d old indexes\n", len(indexIDs))
	for id := range indexIDs {
		debug.Log("RebuildIndex.RebuildIndex", "remove index %v", id.Str())

		err := cmd.repo.Backend().Remove(backend.Index, id.String())
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error removing index %v: %v", id.Str(), err)
			return err
		}
	}

	cmd.global.Printf("checking for additional packs\n")
	newPacks := 0
	for packID := range cmd.repo.List(backend.Data, done) {
		if packsDone.Has(packID) {
			continue
		}

		debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str())
		newPacks++

		rd, err := cmd.repo.Backend().GetReader(backend.Data, packID.String(), 0, 0)
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "GetReader returned error: %v", err)
			return err
		}

		var readSeeker io.ReadSeeker
		if r, ok := rd.(io.ReadSeeker); ok {
			debug.Log("RebuildIndex.RebuildIndex", "reader is seekable")
			readSeeker = r
		} else {
			debug.Log("RebuildIndex.RebuildIndex", "reader is not seekable, loading contents to ram")
			buf, err := ioutil.ReadAll(rd)
			if err != nil {
				return err
			}

			readSeeker = bytes.NewReader(buf)
		}

		up, err := pack.NewUnpacker(cmd.repo.Key(), readSeeker)
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str())
			return err
		}

		for _, blob := range up.Entries {
			debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob)
			combinedIndex.Store(repository.PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				PackID: packID,
				Offset: blob.Offset,
				Length: blob.Length,
			})
		}

		err = rd.Close()
		debug.Log("RebuildIndex.RebuildIndex", "error closing reader for pack %v: %v", packID.Str(), err)

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}
	}

	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("added %d packs to the index\n", newPacks)

	debug.Log("RebuildIndex.RebuildIndex", "done")
	return nil
}
Ejemplo n.º 3
0
func TestCreatePack(t *testing.T) {
	type Buf struct {
		data []byte
		id   backend.ID
	}

	bufs := []Buf{}

	for _, l := range lengths {
		b := make([]byte, l)
		_, err := io.ReadFull(rand.Reader, b)
		OK(t, err)
		h := sha256.Sum256(b)
		bufs = append(bufs, Buf{data: b, id: h})
	}

	file := bytes.NewBuffer(nil)

	// create random keys
	k := crypto.NewRandomKey()

	// pack blobs
	p := pack.NewPacker(k, file)
	for _, b := range bufs {
		p.Add(pack.Tree, b.id, bytes.NewReader(b.data))
	}

	// write file
	n, err := p.Finalize()
	OK(t, err)

	written := 0
	// data
	for _, l := range lengths {
		written += l
	}
	// header length
	written += binary.Size(uint32(0))
	// header
	written += len(lengths) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
	// header crypto
	written += crypto.Extension

	// check length
	Equals(t, uint(written), n)
	Equals(t, uint(written), p.Size())

	// read and parse it again
	rd := bytes.NewReader(file.Bytes())
	np, err := pack.NewUnpacker(k, nil, rd)
	OK(t, err)
	Equals(t, len(np.Entries), len(bufs))

	for i, b := range bufs {
		e := np.Entries[i]
		Equals(t, b.id, e.ID)

		brd, err := e.GetReader(rd)
		OK(t, err)
		data, err := ioutil.ReadAll(brd)
		OK(t, err)

		Assert(t, bytes.Equal(b.data, data),
			"data for blob %v doesn't match", i)
	}
}
Ejemplo n.º 4
0
func (cmd CmdRebuildIndex) RebuildIndex() error {
	debug.Log("RebuildIndex.RebuildIndex", "start")

	done := make(chan struct{})
	defer close(done)

	indexIDs := backend.NewIDSet()
	for id := range cmd.repo.List(backend.Index, done) {
		indexIDs.Insert(id)
	}

	cmd.global.Printf("rebuilding index from %d indexes\n", len(indexIDs))

	debug.Log("RebuildIndex.RebuildIndex", "found %v indexes", len(indexIDs))

	combinedIndex := repository.NewIndex()
	packsDone := backend.NewIDSet()

	type Blob struct {
		id  backend.ID
		tpe pack.BlobType
	}
	blobsDone := make(map[Blob]struct{})

	i := 0
	for indexID := range indexIDs {
		cmd.global.Printf("  loading index %v\n", i)

		debug.Log("RebuildIndex.RebuildIndex", "load index %v", indexID.Str())
		idx, err := repository.LoadIndex(cmd.repo, indexID.String())
		if err != nil {
			return err
		}

		debug.Log("RebuildIndex.RebuildIndex", "adding blobs from index %v", indexID.Str())

		for packedBlob := range idx.Each(done) {
			packsDone.Insert(packedBlob.PackID)
			b := Blob{
				id:  packedBlob.ID,
				tpe: packedBlob.Type,
			}
			if _, ok := blobsDone[b]; ok {
				continue
			}

			blobsDone[b] = struct{}{}
			combinedIndex.Store(packedBlob)
		}

		combinedIndex.AddToSupersedes(indexID)

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}

		i++
	}

	var err error
	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("removing %d old indexes\n", len(indexIDs))
	for id := range indexIDs {
		debug.Log("RebuildIndex.RebuildIndex", "remove index %v", id.Str())

		err := cmd.repo.Backend().Remove(backend.Index, id.String())
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error removing index %v: %v", id.Str(), err)
			return err
		}
	}

	cmd.global.Printf("checking for additional packs\n")
	newPacks := 0
	var buf []byte
	for packID := range cmd.repo.List(backend.Data, done) {
		if packsDone.Has(packID) {
			continue
		}

		debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str())
		newPacks++

		var err error

		h := backend.Handle{Type: backend.Data, Name: packID.String()}
		buf, err = backend.LoadAll(cmd.repo.Backend(), h, buf)
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error while loading pack %v", packID.Str())
			return fmt.Errorf("error while loading pack %v: %v", packID.Str(), err)
		}

		hash := backend.Hash(buf)
		if !hash.Equal(packID) {
			debug.Log("RebuildIndex.RebuildIndex", "Pack ID does not match, want %v, got %v", packID.Str(), hash.Str())
			return fmt.Errorf("Pack ID does not match, want %v, got %v", packID.Str(), hash.Str())
		}

		up, err := pack.NewUnpacker(cmd.repo.Key(), bytes.NewReader(buf))
		if err != nil {
			debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str())
			return err
		}

		for _, blob := range up.Entries {
			debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob)
			combinedIndex.Store(repository.PackedBlob{
				Type:   blob.Type,
				ID:     blob.ID,
				PackID: packID,
				Offset: blob.Offset,
				Length: blob.Length,
			})
		}

		if repository.IndexFull(combinedIndex) {
			combinedIndex, err = cmd.storeIndex(combinedIndex)
			if err != nil {
				return err
			}
		}
	}

	if combinedIndex.Length() > 0 {
		combinedIndex, err = cmd.storeIndex(combinedIndex)
		if err != nil {
			return err
		}
	}

	cmd.global.Printf("added %d packs to the index\n", newPacks)

	debug.Log("RebuildIndex.RebuildIndex", "done")
	return nil
}