func New(c *tlc.Container, basePath string) (wsync.Pool, error) { if basePath == "/dev/null" { return fspool.New(c, basePath), nil } fr, err := eos.Open(basePath) if err != nil { return nil, errors.Wrap(err, 1) } targetInfo, err := fr.Stat() if err != nil { return nil, errors.Wrap(err, 1) } if targetInfo.IsDir() { err := fr.Close() if err != nil { return nil, err } return fspool.New(c, basePath), nil } else { zr, err := zip.NewReader(fr, targetInfo.Size()) if err != nil { return nil, errors.Wrap(err, 1) } return zippool.New(c, zr), nil } }
func ExtractZip(readerAt io.ReaderAt, size int64, dir string, settings ExtractSettings) (*ExtractResult, error) { dirCount := 0 regCount := 0 symlinkCount := 0 reader, err := zip.NewReader(readerAt, size) if err != nil { return nil, errors.Wrap(err, 1) } var totalSize int64 for _, file := range reader.File { totalSize += int64(file.UncompressedSize64) } var doneSize uint64 var lastDoneIndex int = -1 func() { if settings.ResumeFrom == "" { return } resBytes, resErr := ioutil.ReadFile(settings.ResumeFrom) if resErr != nil { if !errors.Is(resErr, os.ErrNotExist) { settings.Consumer.Warnf("Couldn't read resume file: %s", resErr.Error()) } return } lastDone64, resErr := strconv.ParseInt(string(resBytes), 10, 64) if resErr != nil { settings.Consumer.Warnf("Couldn't parse resume file: %s", resErr.Error()) return } lastDoneIndex = int(lastDone64) settings.Consumer.Infof("Resuming from file %d", lastDoneIndex) }() warnedAboutWrite := false writeProgress := func(fileIndex int) { if settings.ResumeFrom == "" { return } payload := fmt.Sprintf("%d", fileIndex) wErr := ioutil.WriteFile(settings.ResumeFrom, []byte(payload), 0644) if wErr != nil { if !warnedAboutWrite { warnedAboutWrite = true settings.Consumer.Warnf("Couldn't save resume file: %s", wErr.Error()) } return } } defer func() { if settings.ResumeFrom == "" { return } rErr := os.Remove(settings.ResumeFrom) if rErr != nil { settings.Consumer.Warnf("Couldn't remove resume file: %s", rErr.Error()) } }() if settings.OnUncompressedSizeKnown != nil { settings.OnUncompressedSizeKnown(totalSize) } windows := runtime.GOOS == "windows" for fileIndex, file := range reader.File { if fileIndex <= lastDoneIndex { settings.Consumer.Debugf("Skipping file %d") doneSize += file.UncompressedSize64 settings.Consumer.Progress(float64(doneSize) / float64(totalSize)) continue } err = func() error { rel := file.Name filename := path.Join(dir, filepath.FromSlash(rel)) info := file.FileInfo() mode := info.Mode() if info.IsDir() { err = Mkdir(filename) if err != nil { return errors.Wrap(err, 1) } dirCount++ } else if mode&os.ModeSymlink > 0 && !windows { fileReader, fErr := file.Open() if fErr != nil { return errors.Wrap(fErr, 1) } defer fileReader.Close() linkname, lErr := ioutil.ReadAll(fileReader) lErr = Symlink(string(linkname), filename, settings.Consumer) if lErr != nil { return errors.Wrap(lErr, 1) } symlinkCount++ } else { regCount++ fileReader, fErr := file.Open() if fErr != nil { return errors.Wrap(fErr, 1) } defer fileReader.Close() settings.Consumer.Debugf("extract %s", filename) countingReader := counter.NewReaderCallback(func(offset int64) { currentSize := int64(doneSize) + offset settings.Consumer.Progress(float64(currentSize) / float64(totalSize)) }, fileReader) err = CopyFile(filename, os.FileMode(mode&LuckyMode|ModeMask), countingReader) if err != nil { return errors.Wrap(err, 1) } } return nil }() if err != nil { return nil, errors.Wrap(err, 1) } doneSize += file.UncompressedSize64 settings.Consumer.Progress(float64(doneSize) / float64(totalSize)) writeProgress(fileIndex) } return &ExtractResult{ Dirs: dirCount, Files: regCount, Symlinks: symlinkCount, }, nil }
// Do starts receiving from the wounds channel and healing func (ah *ArchiveHealer) Do(container *tlc.Container, wounds chan *Wound) error { ah.container = container files := make(map[int64]bool) fileIndices := make(chan int64) if ah.NumWorkers == 0 { ah.NumWorkers = runtime.NumCPU() + 1 } defer ah.File.Close() stat, err := ah.File.Stat() if err != nil { return err } zipReader, err := zip.NewReader(ah.File, stat.Size()) if err != nil { return errors.Wrap(err, 1) } targetPool := fspool.New(container, ah.Target) errs := make(chan error) done := make(chan bool, ah.NumWorkers) cancelled := make(chan struct{}) onChunkHealed := func(healedChunk int64) { atomic.AddInt64(&ah.totalHealed, healedChunk) ah.updateProgress() } for i := 0; i < ah.NumWorkers; i++ { go ah.heal(container, zipReader, stat.Size(), targetPool, fileIndices, errs, done, cancelled, onChunkHealed) } processWound := func(wound *Wound) error { if !wound.Healthy() { ah.totalCorrupted += wound.Size() ah.hasWounds = true } switch wound.Kind { case WoundKind_DIR: dirEntry := container.Dirs[wound.Index] path := filepath.Join(ah.Target, filepath.FromSlash(dirEntry.Path)) pErr := os.MkdirAll(path, 0755) if pErr != nil { return pErr } case WoundKind_SYMLINK: symlinkEntry := container.Symlinks[wound.Index] path := filepath.Join(ah.Target, filepath.FromSlash(symlinkEntry.Path)) dir := filepath.Dir(path) pErr := os.MkdirAll(dir, 0755) if pErr != nil { return pErr } pErr = os.Symlink(symlinkEntry.Dest, path) if pErr != nil { return pErr } case WoundKind_FILE: if files[wound.Index] { // already queued return nil } file := container.Files[wound.Index] if ah.Consumer != nil { ah.Consumer.ProgressLabel(file.Path) } atomic.AddInt64(&ah.totalHealing, file.Size) ah.updateProgress() files[wound.Index] = true select { case pErr := <-errs: return pErr case fileIndices <- wound.Index: // queued for work! } case WoundKind_CLOSED_FILE: if files[wound.Index] { // already healing whole file } else { fileSize := container.Files[wound.Index].Size // whole file was healthy if wound.End == fileSize { atomic.AddInt64(&ah.totalHealthy, fileSize) } } default: return fmt.Errorf("unknown wound kind: %d", wound.Kind) } return nil } for wound := range wounds { err = processWound(wound) if err != nil { close(fileIndices) close(cancelled) return errors.Wrap(err, 1) } } // queued everything close(fileIndices) // expecting up to NumWorkers done, some may still // send errors for i := 0; i < ah.NumWorkers; i++ { select { case err = <-errs: close(cancelled) return errors.Wrap(err, 1) case <-done: // good! } } return nil }
func file(path string) { reader, err := eos.Open(path) must(err) defer reader.Close() stats, err := reader.Stat() if os.IsNotExist(err) { comm.Dief("%s: no such file or directory", path) } must(err) if stats.IsDir() { comm.Logf("%s: directory", path) return } if stats.Size() == 0 { comm.Logf("%s: empty file. peaceful.", path) return } prettySize := humanize.IBytes(uint64(stats.Size())) var magic int32 must(binary.Read(reader, wire.Endianness, &magic)) switch magic { case pwr.PatchMagic: { ph := &pwr.PatchHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(ph)) rctx, err = pwr.DecompressWire(rctx, ph.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) // target container container.Reset() must(rctx.ReadMessage(container)) // source container comm.Logf("%s: %s wharf patch file (%s) with %s", path, prettySize, ph.GetCompression().ToString(), container.Stats()) comm.Result(ContainerResult{ Type: "wharf/patch", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) } case pwr.SignatureMagic: { sh := &pwr.SignatureHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(sh)) rctx, err = pwr.DecompressWire(rctx, sh.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) comm.Logf("%s: %s wharf signature file (%s) with %s", path, prettySize, sh.GetCompression().ToString(), container.Stats()) comm.Result(ContainerResult{ Type: "wharf/signature", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) } case pwr.ManifestMagic: { mh := &pwr.ManifestHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(mh)) rctx, err = pwr.DecompressWire(rctx, mh.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) comm.Logf("%s: %s wharf manifest file (%s) with %s", path, prettySize, mh.GetCompression().ToString(), container.Stats()) comm.Result(ContainerResult{ Type: "wharf/manifest", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) } case pwr.WoundsMagic: { wh := &pwr.WoundsHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(wh)) container := &tlc.Container{} must(rctx.ReadMessage(container)) files := make(map[int64]bool) totalWounds := int64(0) for { wound := &pwr.Wound{} err = rctx.ReadMessage(wound) if err != nil { if errors.Is(err, io.EOF) { break } else { must(err) } } if wound.Kind == pwr.WoundKind_FILE { totalWounds += (wound.End - wound.Start) files[wound.Index] = true } } comm.Logf("%s: %s wharf wounds file with %s, %s wounds in %d files", path, prettySize, container.Stats(), humanize.IBytes(uint64(totalWounds)), len(files)) comm.Result(ContainerResult{ Type: "wharf/wounds", }) } default: _, err := reader.Seek(0, os.SEEK_SET) must(err) wasZip := func() bool { zr, err := zip.NewReader(reader, stats.Size()) if err != nil { if err != zip.ErrFormat { must(err) } return false } container, err := tlc.WalkZip(zr, func(fi os.FileInfo) bool { return true }) must(err) comm.Logf("%s: %s zip file with %s", path, prettySize, container.Stats()) comm.Result(ContainerResult{ Type: "zip", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) return true }() if !wasZip { comm.Logf("%s: not sure - try the file(1) command if your system has it!", path) } } }
func ls(path string) { reader, err := eos.Open(path) must(err) defer reader.Close() stats, err := reader.Stat() if os.IsNotExist(err) { comm.Dief("%s: no such file or directory", path) } must(err) if stats.IsDir() { comm.Logf("%s: directory", path) return } if stats.Size() == 0 { comm.Logf("%s: empty file. peaceful.", path) return } log := func(line string) { comm.Logf(line) } var magic int32 must(binary.Read(reader, wire.Endianness, &magic)) switch magic { case pwr.PatchMagic: { h := &pwr.PatchHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(h)) rctx, err = pwr.DecompressWire(rctx, h.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) log("pre-patch container:") container.Print(log) container.Reset() must(rctx.ReadMessage(container)) log("================================") log("post-patch container:") container.Print(log) } case pwr.SignatureMagic: { h := &pwr.SignatureHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(h)) rctx, err = pwr.DecompressWire(rctx, h.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) container.Print(log) } case pwr.ManifestMagic: { h := &pwr.ManifestHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(h)) rctx, err = pwr.DecompressWire(rctx, h.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) container.Print(log) } case pwr.WoundsMagic: { wh := &pwr.WoundsHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(wh)) container := &tlc.Container{} must(rctx.ReadMessage(container)) container.Print(log) for { wound := &pwr.Wound{} err = rctx.ReadMessage(wound) if err != nil { if errors.Is(err, io.EOF) { break } else { must(err) } } comm.Logf(wound.PrettyString(container)) } } default: _, err := reader.Seek(0, os.SEEK_SET) must(err) wasZip := func() bool { zr, err := zip.NewReader(reader, stats.Size()) if err != nil { if err != zip.ErrFormat { must(err) } return false } container, err := tlc.WalkZip(zr, func(fi os.FileInfo) bool { return true }) must(err) container.Print(log) return true }() if !wasZip { comm.Logf("%s: not sure - try the file(1) command if your system has it!", path) } } }