func verifyBlobs(t testing.TB, bufs []Buf, k *crypto.Key, rd io.ReadSeeker, packSize uint) { written := 0 for _, l := range lengths { written += l } // header length written += binary.Size(uint32(0)) // header written += len(lengths) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) // header crypto written += crypto.Extension // check length Equals(t, uint(written), packSize) // read and parse it again np, err := pack.NewUnpacker(k, rd) OK(t, err) Equals(t, len(np.Entries), len(bufs)) for i, b := range bufs { e := np.Entries[i] Equals(t, b.id, e.ID) brd, err := e.GetReader(rd) OK(t, err) data, err := ioutil.ReadAll(brd) OK(t, err) Assert(t, bytes.Equal(b.data, data), "data for blob %v doesn't match", i) } }
func printPacks(repo *repository.Repository, wr io.Writer) error { done := make(chan struct{}) defer close(done) f := func(job worker.Job, done <-chan struct{}) (interface{}, error) { name := job.Data.(string) h := backend.Handle{Type: backend.Data, Name: name} rd := backend.NewReadSeeker(repo.Backend(), h) unpacker, err := pack.NewUnpacker(repo.Key(), rd) if err != nil { return nil, err } return unpacker.Entries, nil } jobCh := make(chan worker.Job) resCh := make(chan worker.Job) wp := worker.New(dumpPackWorkers, f, jobCh, resCh) go func() { for name := range repo.Backend().List(backend.Data, done) { jobCh <- worker.Job{Data: name} } close(jobCh) }() for job := range resCh { name := job.Data.(string) if job.Error != nil { fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", name, job.Error) continue } entries := job.Result.([]pack.Blob) p := Pack{ Name: name, Blobs: make([]Blob, len(entries)), } for i, blob := range entries { p.Blobs[i] = Blob{ Type: blob.Type, Length: blob.Length, ID: blob.ID, Offset: blob.Offset, } } prettyPrintJSON(os.Stdout, p) } wp.Wait() return nil }
// ListPack returns the list of blobs saved in the pack id. func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, error) { h := backend.Handle{Type: backend.Data, Name: id.String()} rd := backend.NewReadSeeker(r.Backend(), h) unpacker, err := pack.NewUnpacker(r.Key(), rd) if err != nil { return nil, err } return unpacker.Entries, nil }
// checkPack reads a pack and checks the integrity of all blobs. func checkPack(r *repository.Repository, id backend.ID) error { debug.Log("Checker.checkPack", "checking pack %v", id.Str()) h := backend.Handle{Type: backend.Data, Name: id.String()} buf, err := backend.LoadAll(r.Backend(), h, nil) if err != nil { return err } hash := backend.Hash(buf) if !hash.Equal(id) { debug.Log("Checker.checkPack", "Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) return fmt.Errorf("Pack ID does not match, want %v, got %v", id.Str(), hash.Str()) } unpacker, err := pack.NewUnpacker(r.Key(), bytes.NewReader(buf)) if err != nil { return err } var errs []error for i, blob := range unpacker.Entries { debug.Log("Checker.checkPack", " check blob %d: %v", i, blob.ID.Str()) plainBuf := make([]byte, blob.Length) plainBuf, err = crypto.Decrypt(r.Key(), plainBuf, buf[blob.Offset:blob.Offset+blob.Length]) if err != nil { debug.Log("Checker.checkPack", " error decrypting blob %v: %v", blob.ID.Str(), err) errs = append(errs, fmt.Errorf("blob %v: %v", i, err)) continue } hash := backend.Hash(plainBuf) if !hash.Equal(blob.ID) { debug.Log("Checker.checkPack", " Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str()) errs = append(errs, fmt.Errorf("Blob ID does not match, want %v, got %v", blob.ID.Str(), hash.Str())) continue } } if len(errs) > 0 { return fmt.Errorf("pack %v contains %v errors: %v", id.Str(), len(errs), errs) } return nil }
func TestCreatePack(t *testing.T) { type Buf struct { data []byte id backend.ID } bufs := []Buf{} for _, l := range lengths { b := make([]byte, l) _, err := io.ReadFull(rand.Reader, b) OK(t, err) h := sha256.Sum256(b) bufs = append(bufs, Buf{data: b, id: h}) } // create random keys k := crypto.NewRandomKey() // pack blobs p := pack.NewPacker(k, nil) for _, b := range bufs { p.Add(pack.Tree, b.id, bytes.NewReader(b.data)) } packData, err := p.Finalize() OK(t, err) written := 0 for _, l := range lengths { written += l } // header length written += binary.Size(uint32(0)) // header written += len(lengths) * (binary.Size(pack.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize) // header crypto written += crypto.Extension // check length Equals(t, written, len(packData)) Equals(t, uint(written), p.Size()) // read and parse it again rd := bytes.NewReader(packData) np, err := pack.NewUnpacker(k, rd) OK(t, err) Equals(t, len(np.Entries), len(bufs)) for i, b := range bufs { e := np.Entries[i] Equals(t, b.id, e.ID) brd, err := e.GetReader(rd) OK(t, err) data, err := ioutil.ReadAll(brd) OK(t, err) Assert(t, bytes.Equal(b.data, data), "data for blob %v doesn't match", i) } }
func (cmd CmdRebuildIndex) RebuildIndex() error { debug.Log("RebuildIndex.RebuildIndex", "start") done := make(chan struct{}) defer close(done) indexIDs := backend.NewIDSet() for id := range cmd.repo.List(backend.Index, done) { indexIDs.Insert(id) } cmd.global.Printf("rebuilding index from %d indexes\n", len(indexIDs)) debug.Log("RebuildIndex.RebuildIndex", "found %v indexes", len(indexIDs)) combinedIndex := repository.NewIndex() packsDone := backend.NewIDSet() type Blob struct { id backend.ID tpe pack.BlobType } blobsDone := make(map[Blob]struct{}) i := 0 for indexID := range indexIDs { cmd.global.Printf(" loading index %v\n", i) debug.Log("RebuildIndex.RebuildIndex", "load index %v", indexID.Str()) idx, err := repository.LoadIndex(cmd.repo, indexID.String()) if err != nil { return err } debug.Log("RebuildIndex.RebuildIndex", "adding blobs from index %v", indexID.Str()) for packedBlob := range idx.Each(done) { packsDone.Insert(packedBlob.PackID) b := Blob{ id: packedBlob.ID, tpe: packedBlob.Type, } if _, ok := blobsDone[b]; ok { continue } blobsDone[b] = struct{}{} combinedIndex.Store(packedBlob) } combinedIndex.AddToSupersedes(indexID) if repository.IndexFull(combinedIndex) { combinedIndex, err = cmd.storeIndex(combinedIndex) if err != nil { return err } } i++ } var err error if combinedIndex.Length() > 0 { combinedIndex, err = cmd.storeIndex(combinedIndex) if err != nil { return err } } cmd.global.Printf("removing %d old indexes\n", len(indexIDs)) for id := range indexIDs { debug.Log("RebuildIndex.RebuildIndex", "remove index %v", id.Str()) err := cmd.repo.Backend().Remove(backend.Index, id.String()) if err != nil { debug.Log("RebuildIndex.RebuildIndex", "error removing index %v: %v", id.Str(), err) return err } } cmd.global.Printf("checking for additional packs\n") newPacks := 0 var buf []byte for packID := range cmd.repo.List(backend.Data, done) { if packsDone.Has(packID) { continue } debug.Log("RebuildIndex.RebuildIndex", "pack %v not indexed", packID.Str()) newPacks++ var err error h := backend.Handle{Type: backend.Data, Name: packID.String()} buf, err = backend.LoadAll(cmd.repo.Backend(), h, buf) if err != nil { debug.Log("RebuildIndex.RebuildIndex", "error while loading pack %v", packID.Str()) return fmt.Errorf("error while loading pack %v: %v", packID.Str(), err) } hash := backend.Hash(buf) if !hash.Equal(packID) { debug.Log("RebuildIndex.RebuildIndex", "Pack ID does not match, want %v, got %v", packID.Str(), hash.Str()) return fmt.Errorf("Pack ID does not match, want %v, got %v", packID.Str(), hash.Str()) } up, err := pack.NewUnpacker(cmd.repo.Key(), bytes.NewReader(buf)) if err != nil { debug.Log("RebuildIndex.RebuildIndex", "error while unpacking pack %v", packID.Str()) return err } for _, blob := range up.Entries { debug.Log("RebuildIndex.RebuildIndex", "pack %v: blob %v", packID.Str(), blob) combinedIndex.Store(repository.PackedBlob{ Type: blob.Type, ID: blob.ID, PackID: packID, Offset: blob.Offset, Length: blob.Length, }) } if repository.IndexFull(combinedIndex) { combinedIndex, err = cmd.storeIndex(combinedIndex) if err != nil { return err } } } if combinedIndex.Length() > 0 { combinedIndex, err = cmd.storeIndex(combinedIndex) if err != nil { return err } } cmd.global.Printf("added %d packs to the index\n", newPacks) debug.Log("RebuildIndex.RebuildIndex", "done") return nil }