Пример #1
0
func doSign(output string, signature string, compression pwr.CompressionSettings, fixPerms bool) error {
	comm.Opf("Creating signature for %s", output)
	startTime := time.Now()

	container, err := tlc.WalkAny(output, filterPaths)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	pool, err := pools.New(container, output)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	if fixPerms {
		container.FixPermissions(pool)
	}

	signatureWriter, err := os.Create(signature)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	rawSigWire := wire.NewWriteContext(signatureWriter)
	rawSigWire.WriteMagic(pwr.SignatureMagic)

	rawSigWire.WriteMessage(&pwr.SignatureHeader{
		Compression: &compression,
	})

	sigWire, err := pwr.CompressWire(rawSigWire, &compression)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	sigWire.WriteMessage(container)

	comm.StartProgress()
	err = pwr.ComputeSignatureToWriter(container, pool, comm.NewStateConsumer(), func(hash wsync.BlockHash) error {
		return sigWire.WriteMessage(&pwr.BlockHash{
			WeakHash:   hash.WeakHash,
			StrongHash: hash.StrongHash,
		})
	})
	comm.EndProgress()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = sigWire.Close()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	prettySize := humanize.IBytes(uint64(container.Size))
	perSecond := humanize.IBytes(uint64(float64(container.Size) / time.Since(startTime).Seconds()))
	comm.Statf("%s (%s) @ %s/s\n", prettySize, container.Stats(), perSecond)

	return nil
}
Пример #2
0
func doWalk(path string, out chan walkResult, errs chan error, fixPerms bool) {
	container, err := tlc.WalkAny(path, filterPaths)
	if err != nil {
		errs <- errors.Wrap(err, 1)
		return
	}

	pool, err := pools.New(container, path)
	if err != nil {
		errs <- errors.Wrap(err, 1)
		return
	}

	result := walkResult{
		container: container,
		pool:      pool,
	}

	if fixPerms {
		result.container.FixPermissions(result.pool)
	}
	out <- result
}
Пример #3
0
func doDiff(target string, source string, patch string, compression pwr.CompressionSettings) error {
	var err error

	startTime := time.Now()

	targetSignature := &pwr.SignatureInfo{}

	targetSignature.Container, err = tlc.WalkAny(target, filterPaths)
	if err != nil {
		// Signature file perhaps?
		var signatureReader io.ReadCloser

		signatureReader, err = eos.Open(target)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		targetSignature, err = pwr.ReadSignature(signatureReader)
		if err != nil {
			if errors.Is(err, wire.ErrFormat) {
				return fmt.Errorf("unrecognized target %s (not a container, not a signature file)", target)
			}
			return errors.Wrap(err, 1)
		}

		comm.Opf("Read signature from %s", target)

		err = signatureReader.Close()
		if err != nil {
			return errors.Wrap(err, 1)
		}
	} else {
		// Container (dir, archive, etc.)
		comm.Opf("Hashing %s", target)

		comm.StartProgress()
		var targetPool wsync.Pool
		targetPool, err = pools.New(targetSignature.Container, target)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		targetSignature.Hashes, err = pwr.ComputeSignature(targetSignature.Container, targetPool, comm.NewStateConsumer())
		comm.EndProgress()
		if err != nil {
			return errors.Wrap(err, 1)
		}

		{
			prettySize := humanize.IBytes(uint64(targetSignature.Container.Size))
			perSecond := humanize.IBytes(uint64(float64(targetSignature.Container.Size) / time.Since(startTime).Seconds()))
			comm.Statf("%s (%s) @ %s/s\n", prettySize, targetSignature.Container.Stats(), perSecond)
		}
	}

	startTime = time.Now()

	var sourceContainer *tlc.Container
	sourceContainer, err = tlc.WalkAny(source, filterPaths)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	var sourcePool wsync.Pool
	sourcePool, err = pools.New(sourceContainer, source)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	patchWriter, err := os.Create(patch)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	defer patchWriter.Close()

	signaturePath := patch + ".sig"
	signatureWriter, err := os.Create(signaturePath)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	defer signatureWriter.Close()

	patchCounter := counter.NewWriter(patchWriter)
	signatureCounter := counter.NewWriter(signatureWriter)

	dctx := &pwr.DiffContext{
		SourceContainer: sourceContainer,
		Pool:            sourcePool,

		TargetContainer: targetSignature.Container,
		TargetSignature: targetSignature.Hashes,

		Consumer:    comm.NewStateConsumer(),
		Compression: &compression,
	}

	comm.Opf("Diffing %s", source)
	comm.StartProgress()
	err = dctx.WritePatch(patchCounter, signatureCounter)
	if err != nil {
		return errors.Wrap(err, 1)
	}
	comm.EndProgress()

	totalDuration := time.Since(startTime)
	{
		prettySize := humanize.IBytes(uint64(sourceContainer.Size))
		perSecond := humanize.IBytes(uint64(float64(sourceContainer.Size) / totalDuration.Seconds()))
		comm.Statf("%s (%s) @ %s/s\n", prettySize, sourceContainer.Stats(), perSecond)
	}

	if *diffArgs.verify {
		tmpDir, err := ioutil.TempDir("", "pwr")
		if err != nil {
			return errors.Wrap(err, 1)
		}
		defer os.RemoveAll(tmpDir)

		apply(patch, target, tmpDir, false, signaturePath, "")
	}

	{
		prettyPatchSize := humanize.IBytes(uint64(patchCounter.Count()))
		percReused := 100.0 * float64(dctx.ReusedBytes) / float64(dctx.FreshBytes+dctx.ReusedBytes)
		relToNew := 100.0 * float64(patchCounter.Count()) / float64(sourceContainer.Size)
		prettyFreshSize := humanize.IBytes(uint64(dctx.FreshBytes))

		comm.Statf("Re-used %.2f%% of old, added %s fresh data", percReused, prettyFreshSize)
		comm.Statf("%s patch (%.2f%% of the full size) in %s", prettyPatchSize, relToNew, totalDuration)
	}

	return nil
}
Пример #4
0
func (actx *ApplyContext) patchAll(patchWire *wire.ReadContext, signature *SignatureInfo) (retErr error) {
	sourceContainer := actx.SourceContainer

	var validatingPool *ValidatingPool
	consumerErrs := make(chan error, 1)

	outputPool := actx.OutputPool
	if outputPool == nil {
		outputPool = fspool.New(sourceContainer, actx.OutputPath)
	}

	if signature != nil {
		validatingPool = &ValidatingPool{
			Pool:      outputPool,
			Container: sourceContainer,
			Signature: signature,
		}

		if actx.WoundsPath != "" {
			validatingPool.Wounds = make(chan *Wound)

			actx.WoundsConsumer = &WoundsWriter{
				WoundsPath: actx.WoundsPath,
			}
		}

		if actx.WoundsConsumer != nil {
			go func() {
				consumerErrs <- actx.WoundsConsumer.Do(signature.Container, validatingPool.Wounds)
			}()
		}

		outputPool = validatingPool
	}

	targetContainer := actx.TargetContainer
	targetPool := actx.TargetPool
	if targetPool == nil {
		if actx.TargetPath == "" {
			return fmt.Errorf("apply: need either TargetPool or TargetPath")
		}
		var cErr error
		targetPool, cErr = pools.New(targetContainer, actx.TargetPath)
		if cErr != nil {
			return cErr
		}
	}

	fileOffset := int64(0)
	sourceBytes := sourceContainer.Size
	onSourceWrite := func(count int64) {
		// we measure patching progress as the number of total bytes written
		// to the source container. no-ops (untouched files) count too, so the
		// progress bar may jump ahead a bit at times, but that's a good surprise
		// measuring progress by bytes of the patch read would just be a different
		// kind of inaccuracy (due to decompression buffers, etc.)
		actx.Consumer.Progress(float64(fileOffset+count) / float64(sourceBytes))
	}

	sctx := mksync()
	sh := &SyncHeader{}

	// transpositions, indexed by TargetPath
	transpositions := make(map[string][]*Transposition)
	actx.transpositions = transpositions

	defer func() {
		var closeErr error
		closeErr = targetPool.Close()
		if closeErr != nil {
			if retErr == nil {
				retErr = errors.Wrap(closeErr, 1)
			}
		}

		closeErr = outputPool.Close()
		if closeErr != nil {
			if retErr == nil {
				retErr = errors.Wrap(closeErr, 1)
			}
		}

		if validatingPool != nil {
			if validatingPool.Wounds != nil {
				close(validatingPool.Wounds)
			}
		}

		if actx.WoundsConsumer != nil {
			taskErr := <-consumerErrs
			if taskErr != nil {
				if retErr == nil {
					retErr = errors.Wrap(taskErr, 1)
				}
			}
		}
	}()

	for fileIndex, f := range sourceContainer.Files {
		actx.Consumer.ProgressLabel(f.Path)
		actx.Consumer.Debug(f.Path)
		fileOffset = f.Offset

		// each series of patch operations is preceded by a SyncHeader giving
		// us the file index - it's a super basic measure to make sure the
		// patch file we're reading and the patching algorithm somewhat agree
		// on what's happening.
		sh.Reset()
		err := patchWire.ReadMessage(sh)
		if err != nil {
			retErr = errors.Wrap(err, 1)
			return
		}

		if sh.FileIndex != int64(fileIndex) {
			fmt.Printf("expected fileIndex = %d, got fileIndex %d\n", fileIndex, sh.FileIndex)
			retErr = errors.Wrap(ErrMalformedPatch, 1)
			return
		}

		ops := make(chan wsync.Operation)
		errc := make(chan error, 1)

		go readOps(patchWire, ops, errc)

		bytesWritten, transposition, err := actx.lazilyPatchFile(sctx, targetContainer, targetPool, sourceContainer, outputPool, sh.FileIndex, onSourceWrite, ops, actx.InPlace)
		if err != nil {
			select {
			case nestedErr := <-errc:
				if nestedErr != nil {
					actx.Consumer.Debugf("Had an error while reading ops: %s", nestedErr.Error())
				}
			default:
				// no nested error
			}

			retErr = errors.Wrap(err, 1)
			return
		}

		if transposition != nil {
			transpositions[transposition.TargetPath] = append(transpositions[transposition.TargetPath], transposition)
		} else {
			actx.Stats.TouchedFiles++
			if bytesWritten != f.Size {
				retErr = fmt.Errorf("%s: expected to write %d bytes, wrote %d bytes", f.Path, f.Size, bytesWritten)
				return
			}
		}

		// using errc to signal the end of processing, rather than having a separate
		// done channel. not sure if there's any upside to either
		err = <-errc
		if err != nil {
			retErr = err
			return
		}
	}

	err := actx.applyTranspositions(transpositions)
	if err != nil {
		retErr = err
		return
	}

	return
}
Пример #5
0
func (vctx *ValidatorContext) validate(target string, signature *SignatureInfo, fileIndices chan int64,
	errs chan error, onProgress onProgressFunc, cancelled chan struct{}) {

	var retErr error

	targetPool, err := pools.New(signature.Container, target)
	if err != nil {
		errs <- err
		return
	}

	defer func() {
		err := targetPool.Close()
		if err != nil {
			retErr = errors.Wrap(err, 1)
			return
		}

		errs <- retErr
	}()

	aggregateOut := make(chan *Wound)
	relayDone := make(chan bool)
	go func() {
		for w := range aggregateOut {
			vctx.Wounds <- w
		}
		relayDone <- true
	}()

	wounds := AggregateWounds(aggregateOut, MaxWoundSize)
	defer func() {
		// signal no more wounds are going to be sent
		close(wounds)
		// wait for all of them to be relayed
		<-relayDone
	}()

	validatingPool := &ValidatingPool{
		Pool:      nullpool.New(signature.Container),
		Container: signature.Container,
		Signature: signature,

		Wounds: wounds,
	}

	doOne := func(fileIndex int64) error {
		file := signature.Container.Files[fileIndex]

		var reader io.Reader
		reader, err = targetPool.GetReader(fileIndex)
		if err != nil {
			if os.IsNotExist(err) {
				// whole file is missing
				wound := &Wound{
					Kind:  WoundKind_FILE,
					Index: fileIndex,
					Start: 0,
					End:   file.Size,
				}
				onProgress(file.Size)

				select {
				case wounds <- wound:
				case <-cancelled:
				}
				return nil
			}
			return err
		}

		var writer io.WriteCloser
		writer, err = validatingPool.GetWriter(fileIndex)
		if err != nil {
			return err
		}

		defer writer.Close()

		lastCount := int64(0)
		countingWriter := counter.NewWriterCallback(func(count int64) {
			delta := count - lastCount
			onProgress(delta)
			lastCount = count
		}, writer)

		var writtenBytes int64
		writtenBytes, err = io.Copy(countingWriter, reader)
		if err != nil {
			return err
		}

		if writtenBytes != file.Size {
			onProgress(file.Size - writtenBytes)
			wound := &Wound{
				Kind:  WoundKind_FILE,
				Index: fileIndex,
				Start: writtenBytes,
				End:   file.Size,
			}

			select {
			case wounds <- wound:
			case <-cancelled:
			}
		}

		return nil
	}

	for {
		select {
		case fileIndex, ok := <-fileIndices:
			if !ok {
				// no more work
				return
			}

			err := doOne(fileIndex)
			if err != nil {
				if retErr == nil {
					retErr = err
				}
				return
			}
		case <-cancelled:
			// cancelled
			return
		}
	}
}