// Walk walks the storage and calls the walker callback with each blobref // stops if walker returns non-nil error, and returns that func (s *storage) Walk(ctx *context.Context, walker func(packID int, ref blob.Ref, offset int64, size uint32) error) error { // TODO(tgulacsi): proper verbose flag from context verbose := env.IsDebug() for i := 0; i >= 0; i++ { fh, err := os.Open(s.filename(i)) if err != nil { if os.IsNotExist(err) { break } return err } fh.Close() if err = s.walkPack(verbose, i, walker); err != nil { return err } } return nil }
func newTempDiskpackedWithIndex(t *testing.T, indexConf jsonconfig.Obj) (sto blobserver.Storage, cleanup func()) { restoreLogging := test.TLog(t) dir, err := ioutil.TempDir("", "diskpacked-test") if err != nil { t.Fatal(err) } t.Logf("diskpacked test dir is %q", dir) s, err := newStorage(dir, 1<<20, indexConf) if err != nil { t.Fatalf("newStorage: %v", err) } return s, func() { s.Close() if env.IsDebug() { t.Logf("CAMLI_DEBUG set, skipping cleanup of dir %q", dir) } else { os.RemoveAll(dir) } restoreLogging() } }
func (s *storage) reindexOne(ctx *context.Context, index sorted.KeyValue, overwrite bool, packID int) error { var batch sorted.BatchMutation if overwrite { batch = index.BeginBatch() } allOk := true // TODO(tgulacsi): proper verbose from context verbose := env.IsDebug() misses := make(map[blob.Ref]string, 8) err := s.walkPack(verbose, packID, func(packID int, ref blob.Ref, offset int64, size uint32) error { if !ref.Valid() { if verbose { log.Printf("found deleted blob in %d at %d with size %d", packID, offset, size) } return nil } meta := blobMeta{packID, offset, size}.String() if overwrite && batch != nil { batch.Set(ref.String(), meta) return nil } if _, ok := misses[ref]; ok { // maybe this is the last of this blob. delete(misses, ref) } if old, err := index.Get(ref.String()); err != nil { allOk = false if err == sorted.ErrNotFound { log.Println(ref.String() + ": cannot find in index!") } else { log.Println(ref.String()+": error getting from index: ", err.Error()) } } else if old != meta { if old > meta { misses[ref] = meta log.Printf("WARN: possible duplicate blob %s", ref.String()) } else { allOk = false log.Printf("ERROR: index mismatch for %s - index=%s, meta=%s!", ref.String(), old, meta) } } return nil }) if err != nil { return err } for ref, meta := range misses { log.Printf("ERROR: index mismatch for %s (%s)!", ref.String(), meta) allOk = false } if overwrite && batch != nil { if err := index.CommitBatch(batch); err != nil { return err } } else if !allOk { return fmt.Errorf("index does not match data in %d", packID) } return nil }
if err != nil { return nil, err } ss.BlobRef = br fr.ssmmu.Lock() defer fr.ssmmu.Unlock() fr.ssm[br] = ss return ss, nil }) if err != nil { return nil, err } return ssi.(*superset), nil } var debug = env.IsDebug() // readerForOffset returns a ReadCloser that reads some number of bytes and then EOF // from the provided offset. Seeing EOF doesn't mean the end of the whole file; just the // chunk at that offset. The caller must close the ReadCloser when done reading. func (fr *FileReader) readerForOffset(off int64) (io.ReadCloser, error) { if debug { log.Printf("(%p) readerForOffset %d + %d = %d", fr, fr.rootOff, off, fr.rootOff+off) } if off < 0 { panic("negative offset") } if off >= fr.size { return types.EmptyBody, nil } offRemain := off