func ExtractPath(archive string, destPath string, settings ExtractSettings) (*ExtractResult, error) { var result *ExtractResult var err error file, err := eos.Open(archive) if err != nil { return nil, errors.Wrap(err, 1) } stat, err := file.Stat() if err != nil { return nil, errors.Wrap(err, 1) } defer func() { if cErr := file.Close(); cErr != nil && err == nil { err = errors.Wrap(cErr, 1) } }() result, err = Extract(file, stat.Size(), destPath, settings) if err != nil { return nil, errors.Wrap(err, 1) } return result, nil }
// WalkAny tries to retrieve container information on containerPath. It supports: // the empty container (/dev/null), local directories, zip archives func WalkAny(containerPath string, filter FilterFunc) (*Container, error) { // empty container case if containerPath == NullPath { return &Container{}, nil } file, err := eos.Open(containerPath) if err != nil { return nil, errors.Wrap(err, 1) } defer file.Close() stat, err := file.Stat() if err != nil { return nil, errors.Wrap(err, 1) } if stat.IsDir() { if err != nil { return nil, errors.Wrap(err, 1) } // local directory case return WalkDir(containerPath, filter) } // zip archive case zr, err := zip.NewReader(file, stat.Size()) if err != nil { return nil, errors.Wrap(err, 1) } return WalkZip(zr, filter) }
// NewHealer takes a spec of the form "type,url", and a target folder // and returns a healer that knows how to repair target from spec. func NewHealer(spec string, target string) (Healer, error) { tokens := strings.SplitN(spec, ",", 2) if len(tokens) != 2 { return nil, fmt.Errorf("Invalid healer spec: expected 'type,url' but got '%s'", spec) } healerType := tokens[0] healerURL := tokens[1] switch healerType { case "archive": file, err := eos.Open(healerURL) if err != nil { return nil, errors.Wrap(err, 1) } ah := &ArchiveHealer{ File: file, Target: target, } return ah, nil case "manifest": return nil, fmt.Errorf("Manifest healer: stub") } return nil, fmt.Errorf("Unknown healer type %s", healerType) }
func New(c *tlc.Container, basePath string) (wsync.Pool, error) { if basePath == "/dev/null" { return fspool.New(c, basePath), nil } fr, err := eos.Open(basePath) if err != nil { return nil, errors.Wrap(err, 1) } targetInfo, err := fr.Stat() if err != nil { return nil, errors.Wrap(err, 1) } if targetInfo.IsDir() { err := fr.Close() if err != nil { return nil, err } return fspool.New(c, basePath), nil } else { zr, err := zip.NewReader(fr, targetInfo.Size()) if err != nil { return nil, errors.Wrap(err, 1) } return zippool.New(c, zr), nil } }
func doVerify(signaturePath string, dir string, woundsPath string, healPath string) error { if woundsPath == "" { if healPath == "" { comm.Opf("Verifying %s", dir) } else { comm.Opf("Verifying %s, healing as we go", dir) } } else { if healPath == "" { comm.Opf("Verifying %s, writing wounds to %s", dir, woundsPath) } else { comm.Dief("Options --wounds and --heal cannot be used at the same time") } } startTime := time.Now() signatureReader, err := eos.Open(signaturePath) if err != nil { return errors.Wrap(err, 1) } defer signatureReader.Close() signature, err := pwr.ReadSignature(signatureReader) if err != nil { return errors.Wrap(err, 1) } vc := &pwr.ValidatorContext{ Consumer: comm.NewStateConsumer(), WoundsPath: woundsPath, HealPath: healPath, } comm.StartProgressWithTotalBytes(signature.Container.Size) err = vc.Validate(dir, signature) if err != nil { return errors.Wrap(err, 1) } comm.EndProgress() prettySize := humanize.IBytes(uint64(signature.Container.Size)) perSecond := humanize.IBytes(uint64(float64(signature.Container.Size) / time.Since(startTime).Seconds())) comm.Statf("%s (%s) @ %s/s\n", prettySize, signature.Container.Stats(), perSecond) if vc.WoundsConsumer.HasWounds() { if healer, ok := vc.WoundsConsumer.(pwr.Healer); ok { comm.Statf("%s corrupted data found, %s healed", humanize.IBytes(uint64(vc.WoundsConsumer.TotalCorrupted())), humanize.IBytes(uint64(healer.TotalHealed()))) } else { comm.Dief("%s corrupted data found", humanize.IBytes(uint64(vc.WoundsConsumer.TotalCorrupted()))) } } return nil }
func doProbe(patch string) error { patchReader, err := eos.Open(patch) if err != nil { return err } defer patchReader.Close() stats, err := patchReader.Stat() if err != nil { return err } comm.Statf("patch: %s", humanize.IBytes(uint64(stats.Size()))) rctx := wire.NewReadContext(patchReader) err = rctx.ExpectMagic(pwr.PatchMagic) if err != nil { return err } header := &pwr.PatchHeader{} err = rctx.ReadMessage(header) if err != nil { return err } rctx, err = pwr.DecompressWire(rctx, header.Compression) if err != nil { return err } target := &tlc.Container{} err = rctx.ReadMessage(target) if err != nil { return err } source := &tlc.Container{} err = rctx.ReadMessage(source) if err != nil { return err } comm.Statf("target: %s in %s", humanize.IBytes(uint64(target.Size)), target.Stats()) comm.Statf("source: %s in %s", humanize.IBytes(uint64(target.Size)), source.Stats()) var patchStats []patchStat sh := &pwr.SyncHeader{} rop := &pwr.SyncOp{} for fileIndex, f := range source.Files { stat := patchStat{ fileIndex: int64(fileIndex), freshData: f.Size, } sh.Reset() err = rctx.ReadMessage(sh) if err != nil { return err } if sh.FileIndex != int64(fileIndex) { return fmt.Errorf("malformed patch: expected file %d, got %d", fileIndex, sh.FileIndex) } readingOps := true var pos int64 for readingOps { rop.Reset() err = rctx.ReadMessage(rop) if err != nil { return err } switch rop.Type { case pwr.SyncOp_BLOCK_RANGE: fixedSize := (rop.BlockSpan - 1) * pwr.BlockSize lastIndex := rop.BlockIndex + (rop.BlockSpan - 1) lastSize := pwr.ComputeBlockSize(f.Size, lastIndex) totalSize := (fixedSize + lastSize) stat.freshData -= totalSize pos += totalSize case pwr.SyncOp_DATA: totalSize := int64(len(rop.Data)) if *appArgs.verbose { comm.Debugf("%s fresh data at %s (%d-%d)", humanize.IBytes(uint64(totalSize)), humanize.IBytes(uint64(pos)), pos, pos+totalSize) } pos += totalSize case pwr.SyncOp_HEY_YOU_DID_IT: readingOps = false } } patchStats = append(patchStats, stat) } sort.Sort(byDecreasingFreshData(patchStats)) var totalFresh int64 for _, stat := range patchStats { totalFresh += stat.freshData } var eightyFresh = int64(0.8 * float64(totalFresh)) var printedFresh int64 comm.Opf("80%% of fresh data is in the following files:") for _, stat := range patchStats { f := source.Files[stat.fileIndex] comm.Logf("%s in %s (%.2f%% changed)", humanize.IBytes(uint64(stat.freshData)), f.Path, float64(stat.freshData)/float64(f.Size)*100.0) printedFresh += stat.freshData if printedFresh >= eightyFresh { break } } return nil }
func doCp(srcPath string, destPath string, resume bool) error { src, err := eos.Open(srcPath) if err != nil { return err } defer src.Close() dir := filepath.Dir(destPath) err = os.MkdirAll(dir, 0755) if err != nil { return err } flags := os.O_CREATE | os.O_WRONLY dest, err := os.OpenFile(destPath, flags, 0644) if err != nil { return err } defer dest.Close() stats, err := src.Stat() if err != nil { return err } totalBytes := int64(stats.Size()) startOffset := int64(0) if resume { startOffset, err = dest.Seek(0, os.SEEK_END) if err != nil { return err } if startOffset == 0 { comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes))) } else if startOffset > totalBytes { comm.Logf("Existing data too big (%s > %s), starting over", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes))) } else if startOffset == totalBytes { comm.Logf("All %s already there", humanize.IBytes(uint64(totalBytes))) return nil } comm.Logf("Resuming at %s / %s", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes))) _, err = src.Seek(startOffset, os.SEEK_SET) if err != nil { return err } } else { comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes))) } start := time.Now() comm.Progress(float64(startOffset) / float64(totalBytes)) comm.StartProgressWithTotalBytes(totalBytes) cw := counter.NewWriterCallback(func(count int64) { alpha := float64(startOffset+count) / float64(totalBytes) comm.Progress(alpha) }, dest) copiedBytes, err := io.Copy(cw, src) if err != nil { return err } comm.EndProgress() totalDuration := time.Since(start) prettyStartOffset := humanize.IBytes(uint64(startOffset)) prettySize := humanize.IBytes(uint64(copiedBytes)) perSecond := humanize.IBytes(uint64(float64(totalBytes-startOffset) / totalDuration.Seconds())) comm.Statf("%s + %s copied @ %s/s\n", prettyStartOffset, prettySize, perSecond) return nil }
func TestAllTheThings(t *testing.T) { perm := os.FileMode(0777) workingDir, err := ioutil.TempDir("", "butler-tests") mist(t, err) defer os.RemoveAll(workingDir) sample := path.Join(workingDir, "sample") mist(t, os.MkdirAll(sample, perm)) mist(t, ioutil.WriteFile(path.Join(sample, "hello.txt"), []byte("hello!"), perm)) sample2 := path.Join(workingDir, "sample2") mist(t, os.MkdirAll(sample2, perm)) for i := 0; i < 5; i++ { if i == 3 { // e.g. .gitkeep putfile(t, sample2, i, []byte{}) } else { putfile(t, sample2, i, bytes.Repeat([]byte{0x42, 0x69}, i*200+1)) } } sample3 := path.Join(workingDir, "sample3") mist(t, os.MkdirAll(sample3, perm)) for i := 0; i < 60; i++ { putfile(t, sample3, i, bytes.Repeat([]byte{0x42, 0x69}, i*300+1)) } sample4 := path.Join(workingDir, "sample4") mist(t, os.MkdirAll(sample4, perm)) for i := 0; i < 120; i++ { putfile(t, sample4, i, bytes.Repeat([]byte{0x42, 0x69}, i*150+1)) } sample5 := path.Join(workingDir, "sample5") mist(t, os.MkdirAll(sample5, perm)) rg := rand.New(rand.NewSource(0x239487)) for i := 0; i < 25; i++ { l := 1024 * (i + 2) // our own little twist on fizzbuzz to look out for 1-off errors if i%5 == 0 { l = int(pwr.BlockSize) } else if i%3 == 0 { l = 0 } buf := make([]byte, l) _, err := io.CopyN(bytes.NewBuffer(buf), rg, int64(l)) mist(t, err) putfile(t, sample5, i, buf) } files := map[string]string{ "hello": sample, "80-fixed": sample2, "60-fixed": sample3, "120-fixed": sample4, "random": sample5, "null": "/dev/null", } patch := path.Join(workingDir, "patch.pwr") comm.Configure(true, true, false, false, false, false, false) for _, q := range []int{1, 9} { t.Logf("============ Quality %d ============", q) compression := pwr.CompressionSettings{ Algorithm: pwr.CompressionAlgorithm_BROTLI, Quality: int32(q), } for lhs := range files { for rhs := range files { mist(t, doDiff(files[lhs], files[rhs], patch, compression)) stat, err := os.Lstat(patch) mist(t, err) t.Logf("%10s -> %10s = %s", lhs, rhs, humanize.IBytes(uint64(stat.Size()))) } } } compression := pwr.CompressionSettings{ Algorithm: pwr.CompressionAlgorithm_BROTLI, Quality: 1, } for _, filepath := range files { t.Logf("Signing %s\n", filepath) sigPath := path.Join(workingDir, "signature.pwr.sig") mist(t, doSign(filepath, sigPath, compression, false)) sigReader, err := eos.Open(sigPath) mist(t, err) signature, err := pwr.ReadSignature(sigReader) mist(t, err) mist(t, sigReader.Close()) validator := &pwr.ValidatorContext{ FailFast: true, } mist(t, validator.Validate(filepath, signature)) } // K windows you just sit this one out we'll catch you on the flip side if runtime.GOOS != "windows" { // In-place preserve permissions tests t.Logf("In-place patching should preserve permissions") eperm := os.FileMode(0750) samplePerm1 := path.Join(workingDir, "samplePerm1") mist(t, os.MkdirAll(samplePerm1, perm)) putfileEx(t, samplePerm1, 1, bytes.Repeat([]byte{0x42, 0x69}, 8192), eperm) assert.Equal(t, octal(eperm), octal(permFor(t, path.Join(samplePerm1, "dummy1.dat")))) samplePerm2 := path.Join(workingDir, "samplePerm2") mist(t, os.MkdirAll(samplePerm2, perm)) putfileEx(t, samplePerm2, 1, bytes.Repeat([]byte{0x69, 0x42}, 16384), eperm) assert.Equal(t, octal(eperm), octal(permFor(t, path.Join(samplePerm2, "dummy1.dat")))) mist(t, doDiff(samplePerm1, samplePerm2, patch, compression)) _, err := os.Lstat(patch) mist(t, err) cave := path.Join(workingDir, "cave") ditto(samplePerm1, cave) mist(t, doApply(patch, cave, cave, true, "", "")) assert.Equal(t, octal(eperm|pwr.ModeMask), octal(permFor(t, path.Join(cave, "dummy1.dat")))) } }
func doApply(patch string, target string, output string, inplace bool, signaturePath string, woundsPath string) error { if output == "" { output = target } target = path.Clean(target) output = path.Clean(output) if output == target { if !inplace { comm.Dief("Refusing to destructively patch %s without --inplace", output) } } if signaturePath == "" { comm.Opf("Patching %s", output) } else { comm.Opf("Patching %s with validation", output) } startTime := time.Now() patchReader, err := eos.Open(patch) if err != nil { return errors.Wrap(err, 1) } var signature *pwr.SignatureInfo if signaturePath != "" { sigReader, sigErr := eos.Open(signaturePath) if sigErr != nil { return errors.Wrap(sigErr, 1) } defer sigReader.Close() signature, sigErr = pwr.ReadSignature(sigReader) if sigErr != nil { return errors.Wrap(sigErr, 1) } } actx := &pwr.ApplyContext{ TargetPath: target, OutputPath: output, InPlace: inplace, Signature: signature, WoundsPath: woundsPath, Consumer: comm.NewStateConsumer(), } comm.StartProgress() err = actx.ApplyPatch(patchReader) if err != nil { return errors.Wrap(err, 1) } comm.EndProgress() container := actx.SourceContainer prettySize := humanize.IBytes(uint64(container.Size)) perSecond := humanize.IBytes(uint64(float64(container.Size) / time.Since(startTime).Seconds())) if actx.InPlace { statStr := "" if actx.Stats.TouchedFiles > 0 { statStr += fmt.Sprintf("patched %d, ", actx.Stats.TouchedFiles) } if actx.Stats.MovedFiles > 0 { statStr += fmt.Sprintf("renamed %d, ", actx.Stats.MovedFiles) } if actx.Stats.DeletedFiles > 0 { statStr += fmt.Sprintf("deleted %d, ", actx.Stats.DeletedFiles) } comm.Statf("%s (%s stage)", statStr, humanize.IBytes(uint64(actx.Stats.StageSize))) } comm.Statf("%s (%s) @ %s/s\n", prettySize, container.Stats(), perSecond) if actx.WoundsConsumer != nil && actx.WoundsConsumer.HasWounds() { extra := "" if actx.WoundsPath != "" { extra = fmt.Sprintf(" (written to %s)", actx.WoundsPath) } totalCorrupted := actx.WoundsConsumer.TotalCorrupted() comm.Logf("Result has wounds, %s corrupted data%s", humanize.IBytes(uint64(totalCorrupted)), extra) } return nil }
func doDiff(target string, source string, patch string, compression pwr.CompressionSettings) error { var err error startTime := time.Now() targetSignature := &pwr.SignatureInfo{} targetSignature.Container, err = tlc.WalkAny(target, filterPaths) if err != nil { // Signature file perhaps? var signatureReader io.ReadCloser signatureReader, err = eos.Open(target) if err != nil { return errors.Wrap(err, 1) } targetSignature, err = pwr.ReadSignature(signatureReader) if err != nil { if errors.Is(err, wire.ErrFormat) { return fmt.Errorf("unrecognized target %s (not a container, not a signature file)", target) } return errors.Wrap(err, 1) } comm.Opf("Read signature from %s", target) err = signatureReader.Close() if err != nil { return errors.Wrap(err, 1) } } else { // Container (dir, archive, etc.) comm.Opf("Hashing %s", target) comm.StartProgress() var targetPool wsync.Pool targetPool, err = pools.New(targetSignature.Container, target) if err != nil { return errors.Wrap(err, 1) } targetSignature.Hashes, err = pwr.ComputeSignature(targetSignature.Container, targetPool, comm.NewStateConsumer()) comm.EndProgress() if err != nil { return errors.Wrap(err, 1) } { prettySize := humanize.IBytes(uint64(targetSignature.Container.Size)) perSecond := humanize.IBytes(uint64(float64(targetSignature.Container.Size) / time.Since(startTime).Seconds())) comm.Statf("%s (%s) @ %s/s\n", prettySize, targetSignature.Container.Stats(), perSecond) } } startTime = time.Now() var sourceContainer *tlc.Container sourceContainer, err = tlc.WalkAny(source, filterPaths) if err != nil { return errors.Wrap(err, 1) } var sourcePool wsync.Pool sourcePool, err = pools.New(sourceContainer, source) if err != nil { return errors.Wrap(err, 1) } patchWriter, err := os.Create(patch) if err != nil { return errors.Wrap(err, 1) } defer patchWriter.Close() signaturePath := patch + ".sig" signatureWriter, err := os.Create(signaturePath) if err != nil { return errors.Wrap(err, 1) } defer signatureWriter.Close() patchCounter := counter.NewWriter(patchWriter) signatureCounter := counter.NewWriter(signatureWriter) dctx := &pwr.DiffContext{ SourceContainer: sourceContainer, Pool: sourcePool, TargetContainer: targetSignature.Container, TargetSignature: targetSignature.Hashes, Consumer: comm.NewStateConsumer(), Compression: &compression, } comm.Opf("Diffing %s", source) comm.StartProgress() err = dctx.WritePatch(patchCounter, signatureCounter) if err != nil { return errors.Wrap(err, 1) } comm.EndProgress() totalDuration := time.Since(startTime) { prettySize := humanize.IBytes(uint64(sourceContainer.Size)) perSecond := humanize.IBytes(uint64(float64(sourceContainer.Size) / totalDuration.Seconds())) comm.Statf("%s (%s) @ %s/s\n", prettySize, sourceContainer.Stats(), perSecond) } if *diffArgs.verify { tmpDir, err := ioutil.TempDir("", "pwr") if err != nil { return errors.Wrap(err, 1) } defer os.RemoveAll(tmpDir) apply(patch, target, tmpDir, false, signaturePath, "") } { prettyPatchSize := humanize.IBytes(uint64(patchCounter.Count())) percReused := 100.0 * float64(dctx.ReusedBytes) / float64(dctx.FreshBytes+dctx.ReusedBytes) relToNew := 100.0 * float64(patchCounter.Count()) / float64(sourceContainer.Size) prettyFreshSize := humanize.IBytes(uint64(dctx.FreshBytes)) comm.Statf("Re-used %.2f%% of old, added %s fresh data", percReused, prettyFreshSize) comm.Statf("%s patch (%.2f%% of the full size) in %s", prettyPatchSize, relToNew, totalDuration) } return nil }
// Does not preserve users, nor permission, except the executable bit func ExtractTar(archive string, dir string, settings ExtractSettings) (*ExtractResult, error) { settings.Consumer.Infof("Extracting %s to %s", archive, dir) dirCount := 0 regCount := 0 symlinkCount := 0 file, err := eos.Open(archive) if err != nil { return nil, errors.Wrap(err, 1) } defer file.Close() err = Mkdir(dir) if err != nil { return nil, errors.Wrap(err, 1) } tarReader := tar.NewReader(file) for { header, err := tarReader.Next() if err != nil { if errors.Is(err, io.EOF) { break } return nil, errors.Wrap(err, 1) } rel := header.Name filename := path.Join(dir, filepath.FromSlash(rel)) switch header.Typeflag { case tar.TypeDir: err = Mkdir(filename) if err != nil { return nil, errors.Wrap(err, 1) } dirCount++ case tar.TypeReg: settings.Consumer.Debugf("extract %s", filename) err = CopyFile(filename, os.FileMode(header.Mode&LuckyMode|ModeMask), tarReader) if err != nil { return nil, errors.Wrap(err, 1) } regCount++ case tar.TypeSymlink: err = Symlink(header.Linkname, filename, settings.Consumer) if err != nil { return nil, errors.Wrap(err, 1) } symlinkCount++ default: return nil, fmt.Errorf("Unable to untar entry of type %d", header.Typeflag) } } return &ExtractResult{ Dirs: dirCount, Files: regCount, Symlinks: symlinkCount, }, nil }
func file(path string) { reader, err := eos.Open(path) must(err) defer reader.Close() stats, err := reader.Stat() if os.IsNotExist(err) { comm.Dief("%s: no such file or directory", path) } must(err) if stats.IsDir() { comm.Logf("%s: directory", path) return } if stats.Size() == 0 { comm.Logf("%s: empty file. peaceful.", path) return } prettySize := humanize.IBytes(uint64(stats.Size())) var magic int32 must(binary.Read(reader, wire.Endianness, &magic)) switch magic { case pwr.PatchMagic: { ph := &pwr.PatchHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(ph)) rctx, err = pwr.DecompressWire(rctx, ph.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) // target container container.Reset() must(rctx.ReadMessage(container)) // source container comm.Logf("%s: %s wharf patch file (%s) with %s", path, prettySize, ph.GetCompression().ToString(), container.Stats()) comm.Result(ContainerResult{ Type: "wharf/patch", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) } case pwr.SignatureMagic: { sh := &pwr.SignatureHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(sh)) rctx, err = pwr.DecompressWire(rctx, sh.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) comm.Logf("%s: %s wharf signature file (%s) with %s", path, prettySize, sh.GetCompression().ToString(), container.Stats()) comm.Result(ContainerResult{ Type: "wharf/signature", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) } case pwr.ManifestMagic: { mh := &pwr.ManifestHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(mh)) rctx, err = pwr.DecompressWire(rctx, mh.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) comm.Logf("%s: %s wharf manifest file (%s) with %s", path, prettySize, mh.GetCompression().ToString(), container.Stats()) comm.Result(ContainerResult{ Type: "wharf/manifest", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) } case pwr.WoundsMagic: { wh := &pwr.WoundsHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(wh)) container := &tlc.Container{} must(rctx.ReadMessage(container)) files := make(map[int64]bool) totalWounds := int64(0) for { wound := &pwr.Wound{} err = rctx.ReadMessage(wound) if err != nil { if errors.Is(err, io.EOF) { break } else { must(err) } } if wound.Kind == pwr.WoundKind_FILE { totalWounds += (wound.End - wound.Start) files[wound.Index] = true } } comm.Logf("%s: %s wharf wounds file with %s, %s wounds in %d files", path, prettySize, container.Stats(), humanize.IBytes(uint64(totalWounds)), len(files)) comm.Result(ContainerResult{ Type: "wharf/wounds", }) } default: _, err := reader.Seek(0, os.SEEK_SET) must(err) wasZip := func() bool { zr, err := zip.NewReader(reader, stats.Size()) if err != nil { if err != zip.ErrFormat { must(err) } return false } container, err := tlc.WalkZip(zr, func(fi os.FileInfo) bool { return true }) must(err) comm.Logf("%s: %s zip file with %s", path, prettySize, container.Stats()) comm.Result(ContainerResult{ Type: "zip", NumFiles: len(container.Files), NumDirs: len(container.Dirs), NumSymlinks: len(container.Symlinks), UncompressedSize: container.Size, }) return true }() if !wasZip { comm.Logf("%s: not sure - try the file(1) command if your system has it!", path) } } }
func ls(path string) { reader, err := eos.Open(path) must(err) defer reader.Close() stats, err := reader.Stat() if os.IsNotExist(err) { comm.Dief("%s: no such file or directory", path) } must(err) if stats.IsDir() { comm.Logf("%s: directory", path) return } if stats.Size() == 0 { comm.Logf("%s: empty file. peaceful.", path) return } log := func(line string) { comm.Logf(line) } var magic int32 must(binary.Read(reader, wire.Endianness, &magic)) switch magic { case pwr.PatchMagic: { h := &pwr.PatchHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(h)) rctx, err = pwr.DecompressWire(rctx, h.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) log("pre-patch container:") container.Print(log) container.Reset() must(rctx.ReadMessage(container)) log("================================") log("post-patch container:") container.Print(log) } case pwr.SignatureMagic: { h := &pwr.SignatureHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(h)) rctx, err = pwr.DecompressWire(rctx, h.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) container.Print(log) } case pwr.ManifestMagic: { h := &pwr.ManifestHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(h)) rctx, err = pwr.DecompressWire(rctx, h.GetCompression()) must(err) container := &tlc.Container{} must(rctx.ReadMessage(container)) container.Print(log) } case pwr.WoundsMagic: { wh := &pwr.WoundsHeader{} rctx := wire.NewReadContext(reader) must(rctx.ReadMessage(wh)) container := &tlc.Container{} must(rctx.ReadMessage(container)) container.Print(log) for { wound := &pwr.Wound{} err = rctx.ReadMessage(wound) if err != nil { if errors.Is(err, io.EOF) { break } else { must(err) } } comm.Logf(wound.PrettyString(container)) } } default: _, err := reader.Seek(0, os.SEEK_SET) must(err) wasZip := func() bool { zr, err := zip.NewReader(reader, stats.Size()) if err != nil { if err != zip.ErrFormat { must(err) } return false } container, err := tlc.WalkZip(zr, func(fi os.FileInfo) bool { return true }) must(err) container.Print(log) return true }() if !wasZip { comm.Logf("%s: not sure - try the file(1) command if your system has it!", path) } } }