func doVerify(signaturePath string, dir string, woundsPath string, healPath string) error { if woundsPath == "" { if healPath == "" { comm.Opf("Verifying %s", dir) } else { comm.Opf("Verifying %s, healing as we go", dir) } } else { if healPath == "" { comm.Opf("Verifying %s, writing wounds to %s", dir, woundsPath) } else { comm.Dief("Options --wounds and --heal cannot be used at the same time") } } startTime := time.Now() signatureReader, err := eos.Open(signaturePath) if err != nil { return errors.Wrap(err, 1) } defer signatureReader.Close() signature, err := pwr.ReadSignature(signatureReader) if err != nil { return errors.Wrap(err, 1) } vc := &pwr.ValidatorContext{ Consumer: comm.NewStateConsumer(), WoundsPath: woundsPath, HealPath: healPath, } comm.StartProgressWithTotalBytes(signature.Container.Size) err = vc.Validate(dir, signature) if err != nil { return errors.Wrap(err, 1) } comm.EndProgress() prettySize := humanize.IBytes(uint64(signature.Container.Size)) perSecond := humanize.IBytes(uint64(float64(signature.Container.Size) / time.Since(startTime).Seconds())) comm.Statf("%s (%s) @ %s/s\n", prettySize, signature.Container.Stats(), perSecond) if vc.WoundsConsumer.HasWounds() { if healer, ok := vc.WoundsConsumer.(pwr.Healer); ok { comm.Statf("%s corrupted data found, %s healed", humanize.IBytes(uint64(vc.WoundsConsumer.TotalCorrupted())), humanize.IBytes(uint64(healer.TotalHealed()))) } else { comm.Dief("%s corrupted data found", humanize.IBytes(uint64(vc.WoundsConsumer.TotalCorrupted()))) } } return nil }
func doSign(output string, signature string, compression pwr.CompressionSettings, fixPerms bool) error { comm.Opf("Creating signature for %s", output) startTime := time.Now() container, err := tlc.WalkAny(output, filterPaths) if err != nil { return errors.Wrap(err, 1) } pool, err := pools.New(container, output) if err != nil { return errors.Wrap(err, 1) } if fixPerms { container.FixPermissions(pool) } signatureWriter, err := os.Create(signature) if err != nil { return errors.Wrap(err, 1) } rawSigWire := wire.NewWriteContext(signatureWriter) rawSigWire.WriteMagic(pwr.SignatureMagic) rawSigWire.WriteMessage(&pwr.SignatureHeader{ Compression: &compression, }) sigWire, err := pwr.CompressWire(rawSigWire, &compression) if err != nil { return errors.Wrap(err, 1) } sigWire.WriteMessage(container) comm.StartProgress() err = pwr.ComputeSignatureToWriter(container, pool, comm.NewStateConsumer(), func(hash wsync.BlockHash) error { return sigWire.WriteMessage(&pwr.BlockHash{ WeakHash: hash.WeakHash, StrongHash: hash.StrongHash, }) }) comm.EndProgress() if err != nil { return errors.Wrap(err, 1) } err = sigWire.Close() if err != nil { return errors.Wrap(err, 1) } prettySize := humanize.IBytes(uint64(container.Size)) perSecond := humanize.IBytes(uint64(float64(container.Size) / time.Since(startTime).Seconds())) comm.Statf("%s (%s) @ %s/s\n", prettySize, container.Stats(), perSecond) return nil }
func doUpgrade(head bool) error { if head { if !comm.YesNo("Do you want to upgrade to the bleeding-edge version? Things may break!") { comm.Logf("Okay, not upgrading. Bye!") return nil } return applyUpgrade("head", "head") } if version == "head" { comm.Statf("Bleeding-edge, not upgrading unless told to.") comm.Logf("(Use `--head` if you want to upgrade to the latest bleeding-edge version)") return nil } comm.Opf("Looking for upgrades...") currentVer, latestVer, err := queryLatestVersion() if err != nil { return fmt.Errorf("Version check failed: %s", err.Error()) } if latestVer == nil || currentVer.GTE(*latestVer) { comm.Statf("Your butler is up-to-date. Have a nice day!") return nil } comm.Statf("Current version: %s", currentVer.String()) comm.Statf("Latest version : %s", latestVer.String()) if !comm.YesNo("Do you want to upgrade now?") { comm.Logf("Okay, not upgrading. Bye!") return nil } must(applyUpgrade(currentVer.String(), latestVer.String())) return nil }
func doPush(buildPath string, specStr string, userVersion string, fixPerms bool) error { // start walking source container while waiting on auth flow sourceContainerChan := make(chan walkResult) walkErrs := make(chan error) go doWalk(buildPath, sourceContainerChan, walkErrs, fixPerms) spec, err := itchio.ParseSpec(specStr) if err != nil { return errors.Wrap(err, 1) } err = spec.EnsureChannel() if err != nil { return errors.Wrap(err, 1) } client, err := authenticateViaOauth() if err != nil { return errors.Wrap(err, 1) } newBuildRes, err := client.CreateBuild(spec.Target, spec.Channel, userVersion) if err != nil { return errors.Wrap(err, 1) } buildID := newBuildRes.Build.ID parentID := newBuildRes.Build.ParentBuild.ID var targetSignature *pwr.SignatureInfo if parentID == 0 { comm.Opf("For channel `%s`: pushing first build", spec.Channel) targetSignature = &pwr.SignatureInfo{ Container: &tlc.Container{}, Hashes: make([]wsync.BlockHash, 0), } } else { comm.Opf("For channel `%s`: last build is %d, downloading its signature", spec.Channel, parentID) var buildFiles itchio.ListBuildFilesResponse buildFiles, err = client.ListBuildFiles(parentID) if err != nil { return errors.Wrap(err, 1) } signatureFile := itchio.FindBuildFile(itchio.BuildFileType_SIGNATURE, buildFiles.Files) if signatureFile == nil { comm.Dief("Could not find signature for parent build %d, aborting", parentID) } var signatureReader io.Reader signatureReader, err = client.DownloadBuildFile(parentID, signatureFile.ID) if err != nil { return errors.Wrap(err, 1) } targetSignature, err = pwr.ReadSignature(signatureReader) if err != nil { return errors.Wrap(err, 1) } } newPatchRes, newSignatureRes, err := createBothFiles(client, buildID) if err != nil { return errors.Wrap(err, 1) } uploadDone := make(chan bool) uploadErrs := make(chan error) patchWriter, err := uploader.NewResumableUpload(newPatchRes.File.UploadURL, uploadDone, uploadErrs, uploader.ResumableUploadSettings{ Consumer: comm.NewStateConsumer(), }) patchWriter.MaxChunkGroup = *appArgs.maxChunkGroup if err != nil { return errors.Wrap(err, 1) } signatureWriter, err := uploader.NewResumableUpload(newSignatureRes.File.UploadURL, uploadDone, uploadErrs, uploader.ResumableUploadSettings{ Consumer: comm.NewStateConsumer(), }) signatureWriter.MaxChunkGroup = *appArgs.maxChunkGroup if err != nil { return errors.Wrap(err, 1) } comm.Debugf("Launching patch & signature channels") patchCounter := counter.NewWriter(patchWriter) signatureCounter := counter.NewWriter(signatureWriter) // we started walking the source container in the beginning, // we actually need it now. // note that we could actually start diffing before all the file // creation & upload setup is done var sourceContainer *tlc.Container var sourcePool wsync.Pool comm.Debugf("Waiting for source container") select { case walkErr := <-walkErrs: return errors.Wrap(walkErr, 1) case walkies := <-sourceContainerChan: comm.Debugf("Got sourceContainer!") sourceContainer = walkies.container sourcePool = walkies.pool break } comm.Opf("Pushing %s (%s)", humanize.IBytes(uint64(sourceContainer.Size)), sourceContainer.Stats()) comm.Debugf("Building diff context") var readBytes int64 bytesPerSec := float64(0) lastUploadedBytes := int64(0) stopTicking := make(chan struct{}) updateProgress := func() { uploadedBytes := int64(float64(patchWriter.UploadedBytes)) // input bytes that aren't in output, for example: // - bytes that have been compressed away // - bytes that were in old build and were simply reused goneBytes := readBytes - patchWriter.TotalBytes conservativeTotalBytes := sourceContainer.Size - goneBytes leftBytes := conservativeTotalBytes - uploadedBytes if leftBytes > AlmostThereThreshold { netStatus := "- network idle" if bytesPerSec > 1 { netStatus = fmt.Sprintf("@ %s/s", humanize.IBytes(uint64(bytesPerSec))) } comm.ProgressLabel(fmt.Sprintf("%s, %s left", netStatus, humanize.IBytes(uint64(leftBytes)))) } else { comm.ProgressLabel(fmt.Sprintf("- almost there")) } conservativeProgress := float64(uploadedBytes) / float64(conservativeTotalBytes) conservativeProgress = min(1.0, conservativeProgress) comm.Progress(conservativeProgress) comm.ProgressScale(float64(readBytes) / float64(sourceContainer.Size)) } go func() { ticker := time.NewTicker(time.Second * time.Duration(2)) for { select { case <-ticker.C: bytesPerSec = float64(patchWriter.UploadedBytes-lastUploadedBytes) / 2.0 lastUploadedBytes = patchWriter.UploadedBytes updateProgress() case <-stopTicking: break } } }() patchWriter.OnProgress = updateProgress stateConsumer := &state.Consumer{ OnProgress: func(progress float64) { readBytes = int64(float64(sourceContainer.Size) * progress) updateProgress() }, } dctx := &pwr.DiffContext{ Compression: &pwr.CompressionSettings{ Algorithm: pwr.CompressionAlgorithm_BROTLI, Quality: 1, }, SourceContainer: sourceContainer, Pool: sourcePool, TargetContainer: targetSignature.Container, TargetSignature: targetSignature.Hashes, Consumer: stateConsumer, } comm.StartProgress() comm.ProgressScale(0.0) err = dctx.WritePatch(patchCounter, signatureCounter) if err != nil { return errors.Wrap(err, 1) } // close in a goroutine to avoid deadlocking doClose := func(c io.Closer, done chan bool, errs chan error) { closeErr := c.Close() if closeErr != nil { errs <- errors.Wrap(closeErr, 1) return } done <- true } go doClose(patchWriter, uploadDone, uploadErrs) go doClose(signatureWriter, uploadDone, uploadErrs) for c := 0; c < 4; c++ { select { case uploadErr := <-uploadErrs: return errors.Wrap(uploadErr, 1) case <-uploadDone: comm.Debugf("upload done") } } close(stopTicking) comm.ProgressLabel("finalizing build") finalDone := make(chan bool) finalErrs := make(chan error) doFinalize := func(fileID int64, fileSize int64, done chan bool, errs chan error) { _, err = client.FinalizeBuildFile(buildID, fileID, fileSize) if err != nil { errs <- errors.Wrap(err, 1) return } done <- true } go doFinalize(newPatchRes.File.ID, patchCounter.Count(), finalDone, finalErrs) go doFinalize(newSignatureRes.File.ID, signatureCounter.Count(), finalDone, finalErrs) for i := 0; i < 2; i++ { select { case err := <-finalErrs: return errors.Wrap(err, 1) case <-finalDone: } } comm.EndProgress() { prettyPatchSize := humanize.IBytes(uint64(patchCounter.Count())) percReused := 100.0 * float64(dctx.ReusedBytes) / float64(dctx.FreshBytes+dctx.ReusedBytes) relToNew := 100.0 * float64(patchCounter.Count()) / float64(sourceContainer.Size) prettyFreshSize := humanize.IBytes(uint64(dctx.FreshBytes)) savings := 100.0 - relToNew if dctx.ReusedBytes > 0 { comm.Statf("Re-used %.2f%% of old, added %s fresh data", percReused, prettyFreshSize) } else { comm.Statf("Added %s fresh data", prettyFreshSize) } if savings > 0 && !math.IsNaN(savings) { comm.Statf("%s patch (%.2f%% savings)", prettyPatchSize, 100.0-relToNew) } else { comm.Statf("%s patch (no savings)", prettyPatchSize) } } comm.Opf("Build is now processing, should be up in a bit (see `butler status`)") comm.Logf("") return nil }
func doFetch(specStr string, outPath string) error { var err error err = os.MkdirAll(outPath, os.FileMode(0755)) if err != nil { return errors.Wrap(err, 1) } outFiles, err := ioutil.ReadDir(outPath) if err != nil { return errors.Wrap(err, 1) } if len(outFiles) > 0 { return fmt.Errorf("Destination directory %s exists and is not empty", outPath) } spec, err := itchio.ParseSpec(specStr) if err != nil { return errors.Wrap(err, 1) } err = spec.EnsureChannel() if err != nil { return errors.Wrap(err, 1) } client, err := authenticateViaOauth() if err != nil { return errors.Wrap(err, 1) } comm.Opf("Getting last build of channel %s", spec.Channel) channelResponse, err := client.GetChannel(spec.Target, spec.Channel) if err != nil { return errors.Wrap(err, 1) } if channelResponse.Channel.Head == nil { return fmt.Errorf("Channel %s doesn't have any builds yet", spec.Channel) } head := *channelResponse.Channel.Head var headArchive *itchio.BuildFileInfo for _, file := range head.Files { comm.Debugf("found file %v", file) if file.Type == itchio.BuildFileType_ARCHIVE && file.SubType == itchio.BuildFileSubType_DEFAULT && file.State == itchio.BuildFileState_UPLOADED { headArchive = file break } } if headArchive == nil { return fmt.Errorf("Channel %s's latest build is still processing", spec.Channel) } dlReader, err := client.DownloadBuildFile(head.ID, headArchive.ID) if err != nil { return errors.Wrap(err, 1) } tmpFile, err := ioutil.TempFile("", "butler-fetch") if err != nil { return errors.Wrap(err, 1) } defer func() { if cErr := os.Remove(tmpFile.Name()); err == nil && cErr != nil { err = cErr } }() comm.Opf("Downloading build %d", head.ID) archiveSize, err := io.Copy(tmpFile, dlReader) if err != nil { return errors.Wrap(err, 1) } _, err = tmpFile.Seek(0, os.SEEK_SET) if err != nil { return errors.Wrap(err, 1) } settings := archiver.ExtractSettings{ Consumer: comm.NewStateConsumer(), } comm.Opf("Extracting into %s", outPath) result, err := archiver.Extract(tmpFile, archiveSize, outPath, settings) if err != nil { return errors.Wrap(err, 1) } comm.Statf("Extracted %d dirs, %d files, %d links into %s", result.Dirs, result.Files, result.Symlinks, outPath) if err != nil { return errors.Wrap(err, 1) } return nil }
func doProbe(patch string) error { patchReader, err := eos.Open(patch) if err != nil { return err } defer patchReader.Close() stats, err := patchReader.Stat() if err != nil { return err } comm.Statf("patch: %s", humanize.IBytes(uint64(stats.Size()))) rctx := wire.NewReadContext(patchReader) err = rctx.ExpectMagic(pwr.PatchMagic) if err != nil { return err } header := &pwr.PatchHeader{} err = rctx.ReadMessage(header) if err != nil { return err } rctx, err = pwr.DecompressWire(rctx, header.Compression) if err != nil { return err } target := &tlc.Container{} err = rctx.ReadMessage(target) if err != nil { return err } source := &tlc.Container{} err = rctx.ReadMessage(source) if err != nil { return err } comm.Statf("target: %s in %s", humanize.IBytes(uint64(target.Size)), target.Stats()) comm.Statf("source: %s in %s", humanize.IBytes(uint64(target.Size)), source.Stats()) var patchStats []patchStat sh := &pwr.SyncHeader{} rop := &pwr.SyncOp{} for fileIndex, f := range source.Files { stat := patchStat{ fileIndex: int64(fileIndex), freshData: f.Size, } sh.Reset() err = rctx.ReadMessage(sh) if err != nil { return err } if sh.FileIndex != int64(fileIndex) { return fmt.Errorf("malformed patch: expected file %d, got %d", fileIndex, sh.FileIndex) } readingOps := true var pos int64 for readingOps { rop.Reset() err = rctx.ReadMessage(rop) if err != nil { return err } switch rop.Type { case pwr.SyncOp_BLOCK_RANGE: fixedSize := (rop.BlockSpan - 1) * pwr.BlockSize lastIndex := rop.BlockIndex + (rop.BlockSpan - 1) lastSize := pwr.ComputeBlockSize(f.Size, lastIndex) totalSize := (fixedSize + lastSize) stat.freshData -= totalSize pos += totalSize case pwr.SyncOp_DATA: totalSize := int64(len(rop.Data)) if *appArgs.verbose { comm.Debugf("%s fresh data at %s (%d-%d)", humanize.IBytes(uint64(totalSize)), humanize.IBytes(uint64(pos)), pos, pos+totalSize) } pos += totalSize case pwr.SyncOp_HEY_YOU_DID_IT: readingOps = false } } patchStats = append(patchStats, stat) } sort.Sort(byDecreasingFreshData(patchStats)) var totalFresh int64 for _, stat := range patchStats { totalFresh += stat.freshData } var eightyFresh = int64(0.8 * float64(totalFresh)) var printedFresh int64 comm.Opf("80%% of fresh data is in the following files:") for _, stat := range patchStats { f := source.Files[stat.fileIndex] comm.Logf("%s in %s (%.2f%% changed)", humanize.IBytes(uint64(stat.freshData)), f.Path, float64(stat.freshData)/float64(f.Size)*100.0) printedFresh += stat.freshData if printedFresh >= eightyFresh { break } } return nil }
func doCp(srcPath string, destPath string, resume bool) error { src, err := eos.Open(srcPath) if err != nil { return err } defer src.Close() dir := filepath.Dir(destPath) err = os.MkdirAll(dir, 0755) if err != nil { return err } flags := os.O_CREATE | os.O_WRONLY dest, err := os.OpenFile(destPath, flags, 0644) if err != nil { return err } defer dest.Close() stats, err := src.Stat() if err != nil { return err } totalBytes := int64(stats.Size()) startOffset := int64(0) if resume { startOffset, err = dest.Seek(0, os.SEEK_END) if err != nil { return err } if startOffset == 0 { comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes))) } else if startOffset > totalBytes { comm.Logf("Existing data too big (%s > %s), starting over", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes))) } else if startOffset == totalBytes { comm.Logf("All %s already there", humanize.IBytes(uint64(totalBytes))) return nil } comm.Logf("Resuming at %s / %s", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes))) _, err = src.Seek(startOffset, os.SEEK_SET) if err != nil { return err } } else { comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes))) } start := time.Now() comm.Progress(float64(startOffset) / float64(totalBytes)) comm.StartProgressWithTotalBytes(totalBytes) cw := counter.NewWriterCallback(func(count int64) { alpha := float64(startOffset+count) / float64(totalBytes) comm.Progress(alpha) }, dest) copiedBytes, err := io.Copy(cw, src) if err != nil { return err } comm.EndProgress() totalDuration := time.Since(start) prettyStartOffset := humanize.IBytes(uint64(startOffset)) prettySize := humanize.IBytes(uint64(copiedBytes)) perSecond := humanize.IBytes(uint64(float64(totalBytes-startOffset) / totalDuration.Seconds())) comm.Statf("%s + %s copied @ %s/s\n", prettyStartOffset, prettySize, perSecond) return nil }
func doCmdBsdiff(target string, source string, patch string, concurrency int, measureOverhead bool) error { targetReader, err := os.Open(target) if err != nil { return err } defer targetReader.Close() targetStats, err := targetReader.Stat() if err != nil { return err } sourceReader, err := os.Open(source) if err != nil { return err } defer sourceReader.Close() sourceStats, err := sourceReader.Stat() if err != nil { return err } comm.Opf("Diffing %s (%s) and %s (%s)...", target, humanize.IBytes(uint64(targetStats.Size())), source, humanize.IBytes(uint64(sourceStats.Size()))) patchWriter, err := os.Create(patch) if err != nil { return err } wctx := wire.NewWriteContext(patchWriter) err = wctx.WriteMagic(pwr.PatchMagic) if err != nil { return err } compression := butlerCompressionSettings() err = wctx.WriteMessage(&pwr.PatchHeader{ Compression: &compression, }) if err != nil { return err } wctx, err = pwr.CompressWire(wctx, &compression) if err != nil { return err } targetContainer := &tlc.Container{} targetContainer.Files = []*tlc.File{ &tlc.File{ Path: target, Size: targetStats.Size(), }, } err = wctx.WriteMessage(targetContainer) if err != nil { return err } sourceContainer := &tlc.Container{} sourceContainer.Files = []*tlc.File{ &tlc.File{ Path: source, Size: sourceStats.Size(), }, } err = wctx.WriteMessage(sourceContainer) if err != nil { return err } err = wctx.WriteMessage(&pwr.SyncHeader{ FileIndex: 0, }) if err != nil { return err } err = wctx.WriteMessage(&pwr.SyncOp{ Type: pwr.SyncOp_BSDIFF, FileIndex: 0, }) if err != nil { return err } startTime := time.Now() comm.StartProgress() dc := bsdiff.DiffContext{ MeasureMem: *appArgs.memstats, MeasureParallelOverhead: measureOverhead, SuffixSortConcurrency: concurrency, } err = dc.Do(targetReader, sourceReader, wctx.WriteMessage, comm.NewStateConsumer()) if err != nil { return err } comm.EndProgress() err = wctx.WriteMessage(&pwr.SyncOp{ Type: pwr.SyncOp_HEY_YOU_DID_IT, }) if err != nil { return err } err = wctx.Close() if err != nil { return err } patchStats, err := os.Lstat(patch) if err != nil { return err } duration := time.Since(startTime) perSec := float64(sourceStats.Size()) / duration.Seconds() relToNew := 100.0 * float64(patchStats.Size()) / float64(sourceStats.Size()) comm.Statf("Processed %s @ %s / s, total %s", humanize.IBytes(uint64(sourceStats.Size())), humanize.IBytes(uint64(perSec)), duration) comm.Statf("Wrote %s patch (%.2f%% of total size) to %s", humanize.IBytes(uint64(patchStats.Size())), relToNew, patch) return nil }
func doApply(patch string, target string, output string, inplace bool, signaturePath string, woundsPath string) error { if output == "" { output = target } target = path.Clean(target) output = path.Clean(output) if output == target { if !inplace { comm.Dief("Refusing to destructively patch %s without --inplace", output) } } if signaturePath == "" { comm.Opf("Patching %s", output) } else { comm.Opf("Patching %s with validation", output) } startTime := time.Now() patchReader, err := eos.Open(patch) if err != nil { return errors.Wrap(err, 1) } var signature *pwr.SignatureInfo if signaturePath != "" { sigReader, sigErr := eos.Open(signaturePath) if sigErr != nil { return errors.Wrap(sigErr, 1) } defer sigReader.Close() signature, sigErr = pwr.ReadSignature(sigReader) if sigErr != nil { return errors.Wrap(sigErr, 1) } } actx := &pwr.ApplyContext{ TargetPath: target, OutputPath: output, InPlace: inplace, Signature: signature, WoundsPath: woundsPath, Consumer: comm.NewStateConsumer(), } comm.StartProgress() err = actx.ApplyPatch(patchReader) if err != nil { return errors.Wrap(err, 1) } comm.EndProgress() container := actx.SourceContainer prettySize := humanize.IBytes(uint64(container.Size)) perSecond := humanize.IBytes(uint64(float64(container.Size) / time.Since(startTime).Seconds())) if actx.InPlace { statStr := "" if actx.Stats.TouchedFiles > 0 { statStr += fmt.Sprintf("patched %d, ", actx.Stats.TouchedFiles) } if actx.Stats.MovedFiles > 0 { statStr += fmt.Sprintf("renamed %d, ", actx.Stats.MovedFiles) } if actx.Stats.DeletedFiles > 0 { statStr += fmt.Sprintf("deleted %d, ", actx.Stats.DeletedFiles) } comm.Statf("%s (%s stage)", statStr, humanize.IBytes(uint64(actx.Stats.StageSize))) } comm.Statf("%s (%s) @ %s/s\n", prettySize, container.Stats(), perSecond) if actx.WoundsConsumer != nil && actx.WoundsConsumer.HasWounds() { extra := "" if actx.WoundsPath != "" { extra = fmt.Sprintf(" (written to %s)", actx.WoundsPath) } totalCorrupted := actx.WoundsConsumer.TotalCorrupted() comm.Logf("Result has wounds, %s corrupted data%s", humanize.IBytes(uint64(totalCorrupted)), extra) } return nil }
func doDiff(target string, source string, patch string, compression pwr.CompressionSettings) error { var err error startTime := time.Now() targetSignature := &pwr.SignatureInfo{} targetSignature.Container, err = tlc.WalkAny(target, filterPaths) if err != nil { // Signature file perhaps? var signatureReader io.ReadCloser signatureReader, err = eos.Open(target) if err != nil { return errors.Wrap(err, 1) } targetSignature, err = pwr.ReadSignature(signatureReader) if err != nil { if errors.Is(err, wire.ErrFormat) { return fmt.Errorf("unrecognized target %s (not a container, not a signature file)", target) } return errors.Wrap(err, 1) } comm.Opf("Read signature from %s", target) err = signatureReader.Close() if err != nil { return errors.Wrap(err, 1) } } else { // Container (dir, archive, etc.) comm.Opf("Hashing %s", target) comm.StartProgress() var targetPool wsync.Pool targetPool, err = pools.New(targetSignature.Container, target) if err != nil { return errors.Wrap(err, 1) } targetSignature.Hashes, err = pwr.ComputeSignature(targetSignature.Container, targetPool, comm.NewStateConsumer()) comm.EndProgress() if err != nil { return errors.Wrap(err, 1) } { prettySize := humanize.IBytes(uint64(targetSignature.Container.Size)) perSecond := humanize.IBytes(uint64(float64(targetSignature.Container.Size) / time.Since(startTime).Seconds())) comm.Statf("%s (%s) @ %s/s\n", prettySize, targetSignature.Container.Stats(), perSecond) } } startTime = time.Now() var sourceContainer *tlc.Container sourceContainer, err = tlc.WalkAny(source, filterPaths) if err != nil { return errors.Wrap(err, 1) } var sourcePool wsync.Pool sourcePool, err = pools.New(sourceContainer, source) if err != nil { return errors.Wrap(err, 1) } patchWriter, err := os.Create(patch) if err != nil { return errors.Wrap(err, 1) } defer patchWriter.Close() signaturePath := patch + ".sig" signatureWriter, err := os.Create(signaturePath) if err != nil { return errors.Wrap(err, 1) } defer signatureWriter.Close() patchCounter := counter.NewWriter(patchWriter) signatureCounter := counter.NewWriter(signatureWriter) dctx := &pwr.DiffContext{ SourceContainer: sourceContainer, Pool: sourcePool, TargetContainer: targetSignature.Container, TargetSignature: targetSignature.Hashes, Consumer: comm.NewStateConsumer(), Compression: &compression, } comm.Opf("Diffing %s", source) comm.StartProgress() err = dctx.WritePatch(patchCounter, signatureCounter) if err != nil { return errors.Wrap(err, 1) } comm.EndProgress() totalDuration := time.Since(startTime) { prettySize := humanize.IBytes(uint64(sourceContainer.Size)) perSecond := humanize.IBytes(uint64(float64(sourceContainer.Size) / totalDuration.Seconds())) comm.Statf("%s (%s) @ %s/s\n", prettySize, sourceContainer.Stats(), perSecond) } if *diffArgs.verify { tmpDir, err := ioutil.TempDir("", "pwr") if err != nil { return errors.Wrap(err, 1) } defer os.RemoveAll(tmpDir) apply(patch, target, tmpDir, false, signaturePath, "") } { prettyPatchSize := humanize.IBytes(uint64(patchCounter.Count())) percReused := 100.0 * float64(dctx.ReusedBytes) / float64(dctx.FreshBytes+dctx.ReusedBytes) relToNew := 100.0 * float64(patchCounter.Count()) / float64(sourceContainer.Size) prettyFreshSize := humanize.IBytes(uint64(dctx.FreshBytes)) comm.Statf("Re-used %.2f%% of old, added %s fresh data", percReused, prettyFreshSize) comm.Statf("%s patch (%.2f%% of the full size) in %s", prettyPatchSize, relToNew, totalDuration) } return nil }
func applyUpgrade(before string, after string) error { execPath, err := osext.Executable() if err != nil { return err } oldPath := execPath + ".old" newPath := execPath + ".new" gzPath := newPath + ".gz" err = os.RemoveAll(newPath) if err != nil { return err } err = os.RemoveAll(gzPath) if err != nil { return err } ext := "" if runtime.GOOS == "windows" { ext = ".exe" } fragment := fmt.Sprintf("v%s", after) if after == "head" { fragment = "head" } execURL := fmt.Sprintf("%s/%s/butler%s", updateBaseURL, fragment, ext) gzURL := fmt.Sprintf("%s/%s/butler.gz", updateBaseURL, fragment) comm.Opf("%s", gzURL) err = func() error { _, gErr := tryDl(gzURL, gzPath) if gErr != nil { return gErr } fr, gErr := os.Open(gzPath) if gErr != nil { return gErr } defer fr.Close() gr, gErr := gzip.NewReader(fr) if gErr != nil { return gErr } fw, gErr := os.Create(newPath) if gErr != nil { return gErr } defer fw.Close() _, gErr = io.Copy(fw, gr) if gErr != nil { return gErr } return nil }() if err != nil { comm.Opf("Falling back to %s", execURL) _, err = tryDl(execURL, newPath) must(err) } err = os.Chmod(newPath, os.FileMode(0755)) if err != nil { return err } comm.Opf("Backing up current version to %s just in case...", oldPath) err = os.Rename(execPath, oldPath) if err != nil { return err } err = os.Rename(newPath, execPath) if err != nil { return err } err = os.Remove(oldPath) if err != nil { if os.IsPermission(err) && runtime.GOOS == "windows" { // poor windows doesn't like us removing executables from under it // I vote we move on and let butler.exe.old hang around. } else { return err } } comm.Statf("Upgraded butler from %s to %s. Have a nice day!", before, after) return nil }