func TestConvertIndex(t *testing.T) { WithTestEnvironment(t, oldIndexTestRepo, func(repodir string) { repo := OpenLocalRepo(t, repodir) old := make(map[backend.ID]*repository.Index) for id := range repo.List(backend.Index, nil) { idx, err := repository.LoadIndex(repo, id.String()) OK(t, err) old[id] = idx } OK(t, repository.ConvertIndexes(repo)) for id := range repo.List(backend.Index, nil) { idx, err := repository.LoadIndexWithDecoder(repo, id.String(), repository.DecodeIndex) OK(t, err) Assert(t, len(idx.Supersedes()) == 1, "Expected index %v to supersed exactly one index, got %v", id, idx.Supersedes()) oldIndexID := idx.Supersedes()[0] oldIndex, ok := old[oldIndexID] Assert(t, ok, "Index %v superseds %v, but that wasn't found in the old index map", id.Str(), oldIndexID.Str()) Assert(t, idx.Count(pack.Data) == oldIndex.Count(pack.Data), "Index %v count blobs %v: %v != %v", id.Str(), pack.Data, idx.Count(pack.Data), oldIndex.Count(pack.Data)) Assert(t, idx.Count(pack.Tree) == oldIndex.Count(pack.Tree), "Index %v count blobs %v: %v != %v", id.Str(), pack.Tree, idx.Count(pack.Tree), oldIndex.Count(pack.Tree)) for packedBlob := range idx.Each(nil) { packID, tpe, offset, length, err := oldIndex.Lookup(packedBlob.ID) OK(t, err) Assert(t, packID == packedBlob.PackID, "Check blob %v: pack ID %v != %v", packedBlob.ID, packID, packedBlob.PackID) Assert(t, tpe == packedBlob.Type, "Check blob %v: Type %v != %v", packedBlob.ID, tpe, packedBlob.Type) Assert(t, offset == packedBlob.Offset, "Check blob %v: Type %v != %v", packedBlob.ID, offset, packedBlob.Offset) Assert(t, length == packedBlob.Length, "Check blob %v: Type %v != %v", packedBlob.ID, length, packedBlob.Length) } } }) }
// LoadIndex loads all index files. func (c *Checker) LoadIndex() (hints []error, errs []error) { debug.Log("LoadIndex", "Start") type indexRes struct { Index *repository.Index ID string } indexCh := make(chan indexRes) worker := func(id backend.ID, done <-chan struct{}) error { debug.Log("LoadIndex", "worker got index %v", id) idx, err := repository.LoadIndexWithDecoder(c.repo, id.String(), repository.DecodeIndex) if err == repository.ErrOldIndexFormat { debug.Log("LoadIndex", "index %v has old format", id.Str()) hints = append(hints, ErrOldIndexFormat{id}) idx, err = repository.LoadIndexWithDecoder(c.repo, id.String(), repository.DecodeOldIndex) } if err != nil { return err } select { case indexCh <- indexRes{Index: idx, ID: id.String()}: case <-done: } return nil } var perr error go func() { defer close(indexCh) debug.Log("LoadIndex", "start loading indexes in parallel") perr = repository.FilesInParallel(c.repo.Backend(), backend.Index, defaultParallelism, repository.ParallelWorkFuncParseID(worker)) debug.Log("LoadIndex", "loading indexes finished, error: %v", perr) }() done := make(chan struct{}) defer close(done) if perr != nil { errs = append(errs, perr) return hints, errs } packToIndex := make(map[backend.ID]backend.IDSet) for res := range indexCh { debug.Log("LoadIndex", "process index %v", res.ID) idxID, err := backend.ParseID(res.ID) if err != nil { errs = append(errs, fmt.Errorf("unable to parse as index ID: %v", res.ID)) continue } c.indexes[idxID] = res.Index c.masterIndex.Insert(res.Index) debug.Log("LoadIndex", "process blobs") cnt := 0 for blob := range res.Index.Each(done) { c.packs[blob.PackID] = struct{}{} c.blobs[blob.ID] = struct{}{} c.blobRefs.M[blob.ID] = 0 cnt++ if _, ok := packToIndex[blob.PackID]; !ok { packToIndex[blob.PackID] = backend.NewIDSet() } packToIndex[blob.PackID].Insert(idxID) } debug.Log("LoadIndex", "%d blobs processed", cnt) } debug.Log("LoadIndex", "done, error %v", perr) debug.Log("LoadIndex", "checking for duplicate packs") for packID := range c.packs { debug.Log("LoadIndex", " check pack %v: contained in %d indexes", packID.Str(), len(packToIndex[packID])) if len(packToIndex[packID]) > 1 { hints = append(hints, ErrDuplicatePacks{ PackID: packID, Indexes: packToIndex[packID], }) } } c.repo.SetIndex(c.masterIndex) return hints, errs }