Пример #1
0
func appendAllToFile(src io.Reader, dest string, existingBytes int64, totalBytes int64) error {
	out, _ := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
	defer out.Close()

	prevPercent := 0.0
	comm.StartProgress()

	onWrite := func(bytesDownloaded int64) {
		bytesWritten := existingBytes + bytesDownloaded
		percent := float64(bytesWritten) / float64(totalBytes)
		if math.Abs(percent-prevPercent) < 0.0001 {
			return
		}

		prevPercent = percent
		comm.Progress(percent)
	}
	counter := counter.NewWriterCallback(onWrite, out)

	_, err := io.Copy(counter, src)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	comm.EndProgress()
	return nil
}
Пример #2
0
func doSign(output string, signature string, compression pwr.CompressionSettings, fixPerms bool) error {
	comm.Opf("Creating signature for %s", output)
	startTime := time.Now()

	container, err := tlc.WalkAny(output, filterPaths)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	pool, err := pools.New(container, output)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	if fixPerms {
		container.FixPermissions(pool)
	}

	signatureWriter, err := os.Create(signature)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	rawSigWire := wire.NewWriteContext(signatureWriter)
	rawSigWire.WriteMagic(pwr.SignatureMagic)

	rawSigWire.WriteMessage(&pwr.SignatureHeader{
		Compression: &compression,
	})

	sigWire, err := pwr.CompressWire(rawSigWire, &compression)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	sigWire.WriteMessage(container)

	comm.StartProgress()
	err = pwr.ComputeSignatureToWriter(container, pool, comm.NewStateConsumer(), func(hash wsync.BlockHash) error {
		return sigWire.WriteMessage(&pwr.BlockHash{
			WeakHash:   hash.WeakHash,
			StrongHash: hash.StrongHash,
		})
	})
	comm.EndProgress()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = sigWire.Close()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	prettySize := humanize.IBytes(uint64(container.Size))
	perSecond := humanize.IBytes(uint64(float64(container.Size) / time.Since(startTime).Seconds()))
	comm.Statf("%s (%s) @ %s/s\n", prettySize, container.Stats(), perSecond)

	return nil
}
Пример #3
0
func doVerify(signaturePath string, dir string, woundsPath string, healPath string) error {
	if woundsPath == "" {
		if healPath == "" {
			comm.Opf("Verifying %s", dir)
		} else {
			comm.Opf("Verifying %s, healing as we go", dir)
		}
	} else {
		if healPath == "" {
			comm.Opf("Verifying %s, writing wounds to %s", dir, woundsPath)
		} else {
			comm.Dief("Options --wounds and --heal cannot be used at the same time")
		}
	}
	startTime := time.Now()

	signatureReader, err := eos.Open(signaturePath)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	defer signatureReader.Close()

	signature, err := pwr.ReadSignature(signatureReader)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	vc := &pwr.ValidatorContext{
		Consumer:   comm.NewStateConsumer(),
		WoundsPath: woundsPath,
		HealPath:   healPath,
	}

	comm.StartProgressWithTotalBytes(signature.Container.Size)

	err = vc.Validate(dir, signature)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	comm.EndProgress()

	prettySize := humanize.IBytes(uint64(signature.Container.Size))
	perSecond := humanize.IBytes(uint64(float64(signature.Container.Size) / time.Since(startTime).Seconds()))
	comm.Statf("%s (%s) @ %s/s\n", prettySize, signature.Container.Stats(), perSecond)

	if vc.WoundsConsumer.HasWounds() {
		if healer, ok := vc.WoundsConsumer.(pwr.Healer); ok {
			comm.Statf("%s corrupted data found, %s healed", humanize.IBytes(uint64(vc.WoundsConsumer.TotalCorrupted())), humanize.IBytes(uint64(healer.TotalHealed())))
		} else {
			comm.Dief("%s corrupted data found", humanize.IBytes(uint64(vc.WoundsConsumer.TotalCorrupted())))
		}
	}

	return nil
}
Пример #4
0
func untar(file string, dir string) {
	settings := archiver.ExtractSettings{
		Consumer: comm.NewStateConsumer(),
	}

	comm.StartProgress()
	res, err := archiver.ExtractTar(file, dir, settings)
	comm.EndProgress()

	must(err)
	comm.Logf("Extracted %d dirs, %d files, %d symlinks", res.Dirs, res.Files, res.Symlinks)
}
Пример #5
0
func unzip(file string, dir string, resumeFile string) {
	comm.Opf("Extracting zip %s to %s", file, dir)

	settings := archiver.ExtractSettings{
		Consumer:   comm.NewStateConsumer(),
		ResumeFrom: resumeFile,
		OnUncompressedSizeKnown: func(uncompressedSize int64) {
			comm.StartProgressWithTotalBytes(uncompressedSize)
		},
	}

	res, err := archiver.ExtractPath(file, dir, settings)
	comm.EndProgress()

	must(err)
	comm.Logf("Extracted %d dirs, %d files, %d symlinks", res.Dirs, res.Files, res.Symlinks)
}
Пример #6
0
func doPush(buildPath string, specStr string, userVersion string, fixPerms bool) error {
	// start walking source container while waiting on auth flow
	sourceContainerChan := make(chan walkResult)
	walkErrs := make(chan error)
	go doWalk(buildPath, sourceContainerChan, walkErrs, fixPerms)

	spec, err := itchio.ParseSpec(specStr)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = spec.EnsureChannel()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	client, err := authenticateViaOauth()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	newBuildRes, err := client.CreateBuild(spec.Target, spec.Channel, userVersion)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	buildID := newBuildRes.Build.ID
	parentID := newBuildRes.Build.ParentBuild.ID

	var targetSignature *pwr.SignatureInfo

	if parentID == 0 {
		comm.Opf("For channel `%s`: pushing first build", spec.Channel)
		targetSignature = &pwr.SignatureInfo{
			Container: &tlc.Container{},
			Hashes:    make([]wsync.BlockHash, 0),
		}
	} else {
		comm.Opf("For channel `%s`: last build is %d, downloading its signature", spec.Channel, parentID)
		var buildFiles itchio.ListBuildFilesResponse
		buildFiles, err = client.ListBuildFiles(parentID)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		signatureFile := itchio.FindBuildFile(itchio.BuildFileType_SIGNATURE, buildFiles.Files)
		if signatureFile == nil {
			comm.Dief("Could not find signature for parent build %d, aborting", parentID)
		}

		var signatureReader io.Reader
		signatureReader, err = client.DownloadBuildFile(parentID, signatureFile.ID)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		targetSignature, err = pwr.ReadSignature(signatureReader)
		if err != nil {
			return errors.Wrap(err, 1)
		}
	}

	newPatchRes, newSignatureRes, err := createBothFiles(client, buildID)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	uploadDone := make(chan bool)
	uploadErrs := make(chan error)

	patchWriter, err := uploader.NewResumableUpload(newPatchRes.File.UploadURL,
		uploadDone, uploadErrs, uploader.ResumableUploadSettings{
			Consumer: comm.NewStateConsumer(),
		})
	patchWriter.MaxChunkGroup = *appArgs.maxChunkGroup
	if err != nil {
		return errors.Wrap(err, 1)
	}

	signatureWriter, err := uploader.NewResumableUpload(newSignatureRes.File.UploadURL,
		uploadDone, uploadErrs, uploader.ResumableUploadSettings{
			Consumer: comm.NewStateConsumer(),
		})
	signatureWriter.MaxChunkGroup = *appArgs.maxChunkGroup
	if err != nil {
		return errors.Wrap(err, 1)
	}

	comm.Debugf("Launching patch & signature channels")

	patchCounter := counter.NewWriter(patchWriter)
	signatureCounter := counter.NewWriter(signatureWriter)

	// we started walking the source container in the beginning,
	// we actually need it now.
	// note that we could actually start diffing before all the file
	// creation & upload setup is done

	var sourceContainer *tlc.Container
	var sourcePool wsync.Pool

	comm.Debugf("Waiting for source container")
	select {
	case walkErr := <-walkErrs:
		return errors.Wrap(walkErr, 1)
	case walkies := <-sourceContainerChan:
		comm.Debugf("Got sourceContainer!")
		sourceContainer = walkies.container
		sourcePool = walkies.pool
		break
	}

	comm.Opf("Pushing %s (%s)", humanize.IBytes(uint64(sourceContainer.Size)), sourceContainer.Stats())

	comm.Debugf("Building diff context")
	var readBytes int64

	bytesPerSec := float64(0)
	lastUploadedBytes := int64(0)
	stopTicking := make(chan struct{})

	updateProgress := func() {
		uploadedBytes := int64(float64(patchWriter.UploadedBytes))

		// input bytes that aren't in output, for example:
		//  - bytes that have been compressed away
		//  - bytes that were in old build and were simply reused
		goneBytes := readBytes - patchWriter.TotalBytes

		conservativeTotalBytes := sourceContainer.Size - goneBytes

		leftBytes := conservativeTotalBytes - uploadedBytes
		if leftBytes > AlmostThereThreshold {
			netStatus := "- network idle"
			if bytesPerSec > 1 {
				netStatus = fmt.Sprintf("@ %s/s", humanize.IBytes(uint64(bytesPerSec)))
			}
			comm.ProgressLabel(fmt.Sprintf("%s, %s left", netStatus, humanize.IBytes(uint64(leftBytes))))
		} else {
			comm.ProgressLabel(fmt.Sprintf("- almost there"))
		}

		conservativeProgress := float64(uploadedBytes) / float64(conservativeTotalBytes)
		conservativeProgress = min(1.0, conservativeProgress)
		comm.Progress(conservativeProgress)

		comm.ProgressScale(float64(readBytes) / float64(sourceContainer.Size))
	}

	go func() {
		ticker := time.NewTicker(time.Second * time.Duration(2))
		for {
			select {
			case <-ticker.C:
				bytesPerSec = float64(patchWriter.UploadedBytes-lastUploadedBytes) / 2.0
				lastUploadedBytes = patchWriter.UploadedBytes
				updateProgress()
			case <-stopTicking:
				break
			}
		}
	}()

	patchWriter.OnProgress = updateProgress

	stateConsumer := &state.Consumer{
		OnProgress: func(progress float64) {
			readBytes = int64(float64(sourceContainer.Size) * progress)
			updateProgress()
		},
	}

	dctx := &pwr.DiffContext{
		Compression: &pwr.CompressionSettings{
			Algorithm: pwr.CompressionAlgorithm_BROTLI,
			Quality:   1,
		},

		SourceContainer: sourceContainer,
		Pool:            sourcePool,

		TargetContainer: targetSignature.Container,
		TargetSignature: targetSignature.Hashes,

		Consumer: stateConsumer,
	}

	comm.StartProgress()
	comm.ProgressScale(0.0)
	err = dctx.WritePatch(patchCounter, signatureCounter)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	// close in a goroutine to avoid deadlocking
	doClose := func(c io.Closer, done chan bool, errs chan error) {
		closeErr := c.Close()
		if closeErr != nil {
			errs <- errors.Wrap(closeErr, 1)
			return
		}

		done <- true
	}

	go doClose(patchWriter, uploadDone, uploadErrs)
	go doClose(signatureWriter, uploadDone, uploadErrs)

	for c := 0; c < 4; c++ {
		select {
		case uploadErr := <-uploadErrs:
			return errors.Wrap(uploadErr, 1)
		case <-uploadDone:
			comm.Debugf("upload done")
		}
	}

	close(stopTicking)
	comm.ProgressLabel("finalizing build")

	finalDone := make(chan bool)
	finalErrs := make(chan error)

	doFinalize := func(fileID int64, fileSize int64, done chan bool, errs chan error) {
		_, err = client.FinalizeBuildFile(buildID, fileID, fileSize)
		if err != nil {
			errs <- errors.Wrap(err, 1)
			return
		}

		done <- true
	}

	go doFinalize(newPatchRes.File.ID, patchCounter.Count(), finalDone, finalErrs)
	go doFinalize(newSignatureRes.File.ID, signatureCounter.Count(), finalDone, finalErrs)

	for i := 0; i < 2; i++ {
		select {
		case err := <-finalErrs:
			return errors.Wrap(err, 1)
		case <-finalDone:
		}
	}

	comm.EndProgress()

	{
		prettyPatchSize := humanize.IBytes(uint64(patchCounter.Count()))
		percReused := 100.0 * float64(dctx.ReusedBytes) / float64(dctx.FreshBytes+dctx.ReusedBytes)
		relToNew := 100.0 * float64(patchCounter.Count()) / float64(sourceContainer.Size)
		prettyFreshSize := humanize.IBytes(uint64(dctx.FreshBytes))
		savings := 100.0 - relToNew

		if dctx.ReusedBytes > 0 {
			comm.Statf("Re-used %.2f%% of old, added %s fresh data", percReused, prettyFreshSize)
		} else {
			comm.Statf("Added %s fresh data", prettyFreshSize)
		}

		if savings > 0 && !math.IsNaN(savings) {
			comm.Statf("%s patch (%.2f%% savings)", prettyPatchSize, 100.0-relToNew)
		} else {
			comm.Statf("%s patch (no savings)", prettyPatchSize)
		}
	}
	comm.Opf("Build is now processing, should be up in a bit (see `butler status`)")
	comm.Logf("")

	return nil
}
Пример #7
0
func doCp(srcPath string, destPath string, resume bool) error {
	src, err := eos.Open(srcPath)
	if err != nil {
		return err
	}

	defer src.Close()

	dir := filepath.Dir(destPath)
	err = os.MkdirAll(dir, 0755)
	if err != nil {
		return err
	}

	flags := os.O_CREATE | os.O_WRONLY

	dest, err := os.OpenFile(destPath, flags, 0644)
	if err != nil {
		return err
	}

	defer dest.Close()

	stats, err := src.Stat()
	if err != nil {
		return err
	}

	totalBytes := int64(stats.Size())
	startOffset := int64(0)

	if resume {
		startOffset, err = dest.Seek(0, os.SEEK_END)
		if err != nil {
			return err
		}

		if startOffset == 0 {
			comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes)))
		} else if startOffset > totalBytes {
			comm.Logf("Existing data too big (%s > %s), starting over", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes)))
		} else if startOffset == totalBytes {
			comm.Logf("All %s already there", humanize.IBytes(uint64(totalBytes)))
			return nil
		}

		comm.Logf("Resuming at %s / %s", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes)))

		_, err = src.Seek(startOffset, os.SEEK_SET)
		if err != nil {
			return err
		}
	} else {
		comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes)))
	}

	start := time.Now()

	comm.Progress(float64(startOffset) / float64(totalBytes))
	comm.StartProgressWithTotalBytes(totalBytes)

	cw := counter.NewWriterCallback(func(count int64) {
		alpha := float64(startOffset+count) / float64(totalBytes)
		comm.Progress(alpha)
	}, dest)

	copiedBytes, err := io.Copy(cw, src)
	if err != nil {
		return err
	}
	comm.EndProgress()

	totalDuration := time.Since(start)
	prettyStartOffset := humanize.IBytes(uint64(startOffset))
	prettySize := humanize.IBytes(uint64(copiedBytes))
	perSecond := humanize.IBytes(uint64(float64(totalBytes-startOffset) / totalDuration.Seconds()))
	comm.Statf("%s + %s copied @ %s/s\n", prettyStartOffset, prettySize, perSecond)

	return nil
}
Пример #8
0
func doHeal(dir string, woundsPath string, spec string) error {
	reader, err := os.Open(woundsPath)
	if err != nil {
		return err
	}
	defer reader.Close()

	healer, err := pwr.NewHealer(spec, dir)
	if err != nil {
		return err
	}

	consumer := comm.NewStateConsumer()

	healer.SetConsumer(consumer)

	rctx := wire.NewReadContext(reader)

	err = rctx.ExpectMagic(pwr.WoundsMagic)
	if err != nil {
		return err
	}

	wh := &pwr.WoundsHeader{}
	err = rctx.ReadMessage(wh)
	if err != nil {
		return err
	}

	container := &tlc.Container{}
	err = rctx.ReadMessage(container)
	if err != nil {
		return err
	}

	wounds := make(chan *pwr.Wound)
	errs := make(chan error)

	comm.StartProgress()

	go func() {
		errs <- healer.Do(container, wounds)
	}()

	wound := &pwr.Wound{}
	for {
		wound.Reset()
		err = rctx.ReadMessage(wound)
		if err != nil {
			if err == io.EOF {
				// all good
				break
			}
		}

		select {
		case wounds <- wound:
			// all good
		case healErr := <-errs:
			return healErr
		}
	}

	close(wounds)
	healErr := <-errs

	comm.EndProgress()

	if healErr != nil {
		return healErr
	}

	comm.Opf("All healed!")
	return nil
}
Пример #9
0
func doCmdBsdiff(target string, source string, patch string, concurrency int, measureOverhead bool) error {
	targetReader, err := os.Open(target)
	if err != nil {
		return err
	}

	defer targetReader.Close()

	targetStats, err := targetReader.Stat()
	if err != nil {
		return err
	}

	sourceReader, err := os.Open(source)
	if err != nil {
		return err
	}

	defer sourceReader.Close()

	sourceStats, err := sourceReader.Stat()
	if err != nil {
		return err
	}

	comm.Opf("Diffing %s (%s) and %s (%s)...", target, humanize.IBytes(uint64(targetStats.Size())), source,
		humanize.IBytes(uint64(sourceStats.Size())))

	patchWriter, err := os.Create(patch)
	if err != nil {
		return err
	}

	wctx := wire.NewWriteContext(patchWriter)

	err = wctx.WriteMagic(pwr.PatchMagic)
	if err != nil {
		return err
	}

	compression := butlerCompressionSettings()

	err = wctx.WriteMessage(&pwr.PatchHeader{
		Compression: &compression,
	})
	if err != nil {
		return err
	}

	wctx, err = pwr.CompressWire(wctx, &compression)
	if err != nil {
		return err
	}

	targetContainer := &tlc.Container{}
	targetContainer.Files = []*tlc.File{
		&tlc.File{
			Path: target,
			Size: targetStats.Size(),
		},
	}

	err = wctx.WriteMessage(targetContainer)
	if err != nil {
		return err
	}

	sourceContainer := &tlc.Container{}
	sourceContainer.Files = []*tlc.File{
		&tlc.File{
			Path: source,
			Size: sourceStats.Size(),
		},
	}

	err = wctx.WriteMessage(sourceContainer)
	if err != nil {
		return err
	}

	err = wctx.WriteMessage(&pwr.SyncHeader{
		FileIndex: 0,
	})
	if err != nil {
		return err
	}

	err = wctx.WriteMessage(&pwr.SyncOp{
		Type:      pwr.SyncOp_BSDIFF,
		FileIndex: 0,
	})
	if err != nil {
		return err
	}

	startTime := time.Now()

	comm.StartProgress()

	dc := bsdiff.DiffContext{
		MeasureMem:              *appArgs.memstats,
		MeasureParallelOverhead: measureOverhead,
		SuffixSortConcurrency:   concurrency,
	}

	err = dc.Do(targetReader, sourceReader, wctx.WriteMessage, comm.NewStateConsumer())
	if err != nil {
		return err
	}

	comm.EndProgress()

	err = wctx.WriteMessage(&pwr.SyncOp{
		Type: pwr.SyncOp_HEY_YOU_DID_IT,
	})
	if err != nil {
		return err
	}

	err = wctx.Close()
	if err != nil {
		return err
	}

	patchStats, err := os.Lstat(patch)
	if err != nil {
		return err
	}

	duration := time.Since(startTime)
	perSec := float64(sourceStats.Size()) / duration.Seconds()

	relToNew := 100.0 * float64(patchStats.Size()) / float64(sourceStats.Size())

	comm.Statf("Processed %s @ %s / s, total %s", humanize.IBytes(uint64(sourceStats.Size())), humanize.IBytes(uint64(perSec)), duration)
	comm.Statf("Wrote %s patch (%.2f%% of total size) to %s", humanize.IBytes(uint64(patchStats.Size())), relToNew, patch)

	return nil
}
Пример #10
0
func doApply(patch string, target string, output string, inplace bool, signaturePath string, woundsPath string) error {
	if output == "" {
		output = target
	}

	target = path.Clean(target)
	output = path.Clean(output)
	if output == target {
		if !inplace {
			comm.Dief("Refusing to destructively patch %s without --inplace", output)
		}
	}

	if signaturePath == "" {
		comm.Opf("Patching %s", output)
	} else {
		comm.Opf("Patching %s with validation", output)
	}

	startTime := time.Now()

	patchReader, err := eos.Open(patch)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	var signature *pwr.SignatureInfo
	if signaturePath != "" {
		sigReader, sigErr := eos.Open(signaturePath)
		if sigErr != nil {
			return errors.Wrap(sigErr, 1)
		}
		defer sigReader.Close()

		signature, sigErr = pwr.ReadSignature(sigReader)
		if sigErr != nil {
			return errors.Wrap(sigErr, 1)
		}
	}

	actx := &pwr.ApplyContext{
		TargetPath: target,
		OutputPath: output,
		InPlace:    inplace,
		Signature:  signature,
		WoundsPath: woundsPath,

		Consumer: comm.NewStateConsumer(),
	}

	comm.StartProgress()
	err = actx.ApplyPatch(patchReader)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	comm.EndProgress()

	container := actx.SourceContainer
	prettySize := humanize.IBytes(uint64(container.Size))
	perSecond := humanize.IBytes(uint64(float64(container.Size) / time.Since(startTime).Seconds()))

	if actx.InPlace {
		statStr := ""
		if actx.Stats.TouchedFiles > 0 {
			statStr += fmt.Sprintf("patched %d, ", actx.Stats.TouchedFiles)
		}
		if actx.Stats.MovedFiles > 0 {
			statStr += fmt.Sprintf("renamed %d, ", actx.Stats.MovedFiles)
		}
		if actx.Stats.DeletedFiles > 0 {
			statStr += fmt.Sprintf("deleted %d, ", actx.Stats.DeletedFiles)
		}

		comm.Statf("%s (%s stage)", statStr, humanize.IBytes(uint64(actx.Stats.StageSize)))
	}
	comm.Statf("%s (%s) @ %s/s\n", prettySize, container.Stats(), perSecond)

	if actx.WoundsConsumer != nil && actx.WoundsConsumer.HasWounds() {
		extra := ""
		if actx.WoundsPath != "" {
			extra = fmt.Sprintf(" (written to %s)", actx.WoundsPath)
		}

		totalCorrupted := actx.WoundsConsumer.TotalCorrupted()
		comm.Logf("Result has wounds, %s corrupted data%s", humanize.IBytes(uint64(totalCorrupted)), extra)
	}

	return nil
}
Пример #11
0
func doDiff(target string, source string, patch string, compression pwr.CompressionSettings) error {
	var err error

	startTime := time.Now()

	targetSignature := &pwr.SignatureInfo{}

	targetSignature.Container, err = tlc.WalkAny(target, filterPaths)
	if err != nil {
		// Signature file perhaps?
		var signatureReader io.ReadCloser

		signatureReader, err = eos.Open(target)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		targetSignature, err = pwr.ReadSignature(signatureReader)
		if err != nil {
			if errors.Is(err, wire.ErrFormat) {
				return fmt.Errorf("unrecognized target %s (not a container, not a signature file)", target)
			}
			return errors.Wrap(err, 1)
		}

		comm.Opf("Read signature from %s", target)

		err = signatureReader.Close()
		if err != nil {
			return errors.Wrap(err, 1)
		}
	} else {
		// Container (dir, archive, etc.)
		comm.Opf("Hashing %s", target)

		comm.StartProgress()
		var targetPool wsync.Pool
		targetPool, err = pools.New(targetSignature.Container, target)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		targetSignature.Hashes, err = pwr.ComputeSignature(targetSignature.Container, targetPool, comm.NewStateConsumer())
		comm.EndProgress()
		if err != nil {
			return errors.Wrap(err, 1)
		}

		{
			prettySize := humanize.IBytes(uint64(targetSignature.Container.Size))
			perSecond := humanize.IBytes(uint64(float64(targetSignature.Container.Size) / time.Since(startTime).Seconds()))
			comm.Statf("%s (%s) @ %s/s\n", prettySize, targetSignature.Container.Stats(), perSecond)
		}
	}

	startTime = time.Now()

	var sourceContainer *tlc.Container
	sourceContainer, err = tlc.WalkAny(source, filterPaths)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	var sourcePool wsync.Pool
	sourcePool, err = pools.New(sourceContainer, source)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	patchWriter, err := os.Create(patch)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	defer patchWriter.Close()

	signaturePath := patch + ".sig"
	signatureWriter, err := os.Create(signaturePath)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	defer signatureWriter.Close()

	patchCounter := counter.NewWriter(patchWriter)
	signatureCounter := counter.NewWriter(signatureWriter)

	dctx := &pwr.DiffContext{
		SourceContainer: sourceContainer,
		Pool:            sourcePool,

		TargetContainer: targetSignature.Container,
		TargetSignature: targetSignature.Hashes,

		Consumer:    comm.NewStateConsumer(),
		Compression: &compression,
	}

	comm.Opf("Diffing %s", source)
	comm.StartProgress()
	err = dctx.WritePatch(patchCounter, signatureCounter)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	comm.EndProgress()

	totalDuration := time.Since(startTime)
	{
		prettySize := humanize.IBytes(uint64(sourceContainer.Size))
		perSecond := humanize.IBytes(uint64(float64(sourceContainer.Size) / totalDuration.Seconds()))
		comm.Statf("%s (%s) @ %s/s\n", prettySize, sourceContainer.Stats(), perSecond)
	}

	if *diffArgs.verify {
		tmpDir, err := ioutil.TempDir("", "pwr")
		if err != nil {
			return errors.Wrap(err, 1)
		}
		defer os.RemoveAll(tmpDir)

		apply(patch, target, tmpDir, false, signaturePath, "")
	}

	{
		prettyPatchSize := humanize.IBytes(uint64(patchCounter.Count()))
		percReused := 100.0 * float64(dctx.ReusedBytes) / float64(dctx.FreshBytes+dctx.ReusedBytes)
		relToNew := 100.0 * float64(patchCounter.Count()) / float64(sourceContainer.Size)
		prettyFreshSize := humanize.IBytes(uint64(dctx.FreshBytes))

		comm.Statf("Re-used %.2f%% of old, added %s fresh data", percReused, prettyFreshSize)
		comm.Statf("%s patch (%.2f%% of the full size) in %s", prettyPatchSize, relToNew, totalDuration)
	}

	return nil
}
Пример #12
0
// Does not preserve users, nor permission, except the executable bit
func ditto(src string, dst string) {
	comm.Debugf("rsync -a %s %s", src, dst)

	totalSize := int64(0)
	doneSize := int64(0)
	oldProgress := 0.0

	inc := func(_ string, f os.FileInfo, err error) error {
		if err != nil {
			return nil
		}
		totalSize += f.Size()
		return nil
	}

	onFile := func(path string, f os.FileInfo, err error) error {
		if err != nil {
			comm.Logf("ignoring error %s", err.Error())
			return nil
		}

		rel, err := filepath.Rel(src, path)
		must(err)

		dstpath := filepath.Join(dst, rel)
		mode := f.Mode()

		switch {
		case mode.IsDir():
			dittoMkdir(dstpath)

		case mode.IsRegular():
			dittoReg(path, dstpath, os.FileMode(f.Mode()&archiver.LuckyMode|archiver.ModeMask))

		case (mode&os.ModeSymlink > 0):
			dittoSymlink(path, dstpath, f)
		}

		comm.Debug(rel)

		doneSize += f.Size()

		progress := float64(doneSize) / float64(totalSize)
		if progress-oldProgress > 0.01 {
			oldProgress = progress
			comm.Progress(progress)
		}

		return nil
	}

	rootinfo, err := os.Lstat(src)
	must(err)

	if rootinfo.IsDir() {
		totalSize = 0
		comm.Logf("Counting files in %s...", src)
		filepath.Walk(src, inc)

		comm.Logf("Mirroring...")
		filepath.Walk(src, onFile)
	} else {
		totalSize = rootinfo.Size()
		onFile(src, rootinfo, nil)
	}

	comm.EndProgress()
}