func TestFilesInParallel(t *testing.T) { f := func(id string, done <-chan struct{}) error { time.Sleep(1 * time.Millisecond) return nil } for n := uint(1); n < 5; n++ { err := repository.FilesInParallel(lister, backend.Data, n*100, f) OK(t, err) } }
func TestFilesInParallelWithError(t *testing.T) { f := func(id string, done <-chan struct{}) error { time.Sleep(1 * time.Millisecond) if rand.Float32() < 0.01 { return errTest } return nil } for n := uint(1); n < 5; n++ { err := repository.FilesInParallel(lister, backend.Data, n*100, f) Equals(t, errTest, err) } }
// loadSnapshotTreeIDs loads all snapshots from backend and returns the tree IDs. func loadSnapshotTreeIDs(repo *repository.Repository) (backend.IDs, []error) { var trees struct { IDs backend.IDs sync.Mutex } var errs struct { errs []error sync.Mutex } snapshotWorker := func(strID string, done <-chan struct{}) error { id, err := backend.ParseID(strID) if err != nil { return err } debug.Log("Checker.Snaphots", "load snapshot %v", id.Str()) treeID, err := loadTreeFromSnapshot(repo, id) if err != nil { errs.Lock() errs.errs = append(errs.errs, err) errs.Unlock() return nil } debug.Log("Checker.Snaphots", "snapshot %v has tree %v", id.Str(), treeID.Str()) trees.Lock() trees.IDs = append(trees.IDs, treeID) trees.Unlock() return nil } err := repository.FilesInParallel(repo.Backend(), backend.Snapshot, defaultParallelism, snapshotWorker) if err != nil { errs.errs = append(errs.errs, err) } return trees.IDs, errs.errs }
// LoadIndex loads all index files. func (c *Checker) LoadIndex() (hints []error, errs []error) { debug.Log("LoadIndex", "Start") type indexRes struct { Index *repository.Index ID string } indexCh := make(chan indexRes) worker := func(id backend.ID, done <-chan struct{}) error { debug.Log("LoadIndex", "worker got index %v", id) idx, err := repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeIndex) if err == repository.ErrOldIndexFormat { debug.Log("LoadIndex", "index %v has old format", id.Str()) hints = append(hints, ErrOldIndexFormat{id}) idx, err = repository.LoadIndexWithDecoder(c.repo, id, repository.DecodeOldIndex) } if err != nil { return err } select { case indexCh <- indexRes{Index: idx, ID: id.String()}: case <-done: } return nil } var perr error go func() { defer close(indexCh) debug.Log("LoadIndex", "start loading indexes in parallel") perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, repository.ParallelWorkFuncParseID(worker)) debug.Log("LoadIndex", "loading indexes finished, error: %v", perr) }() done := make(chan struct{}) defer close(done) if perr != nil { errs = append(errs, perr) return hints, errs } packToIndex := make(map[backend.ID]backend.IDSet) for res := range indexCh { debug.Log("LoadIndex", "process index %v", res.ID) idxID, err := backend.ParseID(res.ID) if err != nil { errs = append(errs, fmt.Errorf("unable to parse as index ID: %v", res.ID)) continue } c.indexes[idxID] = res.Index c.masterIndex.Insert(res.Index) debug.Log("LoadIndex", "process blobs") cnt := 0 for blob := range res.Index.Each(done) { c.packs.Insert(blob.PackID) c.blobs.Insert(blob.ID) c.blobRefs.M[blob.ID] = 0 cnt++ if _, ok := packToIndex[blob.PackID]; !ok { packToIndex[blob.PackID] = backend.NewIDSet() } packToIndex[blob.PackID].Insert(idxID) } debug.Log("LoadIndex", "%d blobs processed", cnt) } debug.Log("LoadIndex", "done, error %v", perr) debug.Log("LoadIndex", "checking for duplicate packs") for packID := range c.packs { debug.Log("LoadIndex", " check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID])) if len(packToIndex[packID]) > 1 { hints = append(hints, ErrDuplicatePacks{ PackID: packID, Indexes: packToIndex[packID], }) } } c.repo.SetIndex(c.masterIndex) return hints, errs }