コード例 #1
0
ファイル: apply.go プロジェクト: itchio/butler
func readOps(rc *wire.ReadContext, ops chan wsync.Operation, errc chan error) {
	defer close(ops)
	rop := &SyncOp{}

	readingOps := true
	for readingOps {
		rop.Reset()
		err := rc.ReadMessage(rop)
		if err != nil {
			fmt.Fprintf(os.Stderr, "readOps error: %s", err.Error())
			errc <- errors.Wrap(err, 1)
			return
		}

		switch rop.Type {
		case SyncOp_BLOCK_RANGE:
			ops <- wsync.Operation{
				Type:       wsync.OpBlockRange,
				FileIndex:  rop.FileIndex,
				BlockIndex: rop.BlockIndex,
				BlockSpan:  rop.BlockSpan,
			}

		case SyncOp_DATA:
			ops <- wsync.Operation{
				Type: wsync.OpData,
				Data: rop.Data,
			}

		default:
			switch rop.Type {
			case SyncOp_HEY_YOU_DID_IT:
				// series of patching operations always end with a SyncOp_HEY_YOU_DID_IT.
				// this helps detect truncated patch files, and, again, basic boundary
				// safety measures are cheap and reassuring.
				readingOps = false
			default:
				errc <- errors.Wrap(ErrMalformedPatch, 1)
				return
			}
		}
	}

	errc <- nil
}
コード例 #2
0
ファイル: compression.go プロジェクト: itchio/wharf
// DecompressWire wraps a wire.ReadContext into a decompressor, according to the given settings,
// so that any messages read through the returned ReadContext will first be decompressed.
func DecompressWire(ctx *wire.ReadContext, compression *CompressionSettings) (*wire.ReadContext, error) {
	if compression == nil {
		return nil, errors.Wrap(fmt.Errorf("no compression specified"), 1)
	}

	if compression.Algorithm == CompressionAlgorithm_NONE {
		return ctx, nil
	}

	decompressor := decompressors[compression.Algorithm]
	if decompressor == nil {
		return nil, errors.Wrap(fmt.Errorf("no compressor registered for %s", compression.Algorithm.String()), 1)
	}

	compressedReader, err := decompressor.Apply(ctx.Reader())
	if err != nil {
		return nil, errors.Wrap(err, 1)
	}

	return wire.NewReadContext(compressedReader), nil
}
コード例 #3
0
ファイル: genie.go プロジェクト: itchio/wharf
func (g *Genie) analyzeFile(patchWire *wire.ReadContext, fileIndex int64, fileSize int64, onComp CompositionListener) error {
	rop := &pwr.SyncOp{}

	smallBlockSize := int64(pwr.BlockSize)
	bigBlockSize := g.BlockSize

	comp := &Composition{
		FileIndex: int64(fileIndex),
	}

	// infinite loop, explicitly "break"'d out of
	for {
		rop.Reset()
		pErr := patchWire.ReadMessage(rop)
		if pErr != nil {
			return errors.Wrap(pErr, 1)
		}

		switch rop.Type {
		case pwr.SyncOp_BLOCK_RANGE:
			// SyncOps operate in terms of small blocks, we want byte offsets
			bo := &BlockOrigin{
				FileIndex: rop.FileIndex,
				Offset:    rop.BlockIndex * smallBlockSize,
				Size:      rop.BlockSpan * smallBlockSize,
			}

			// As long as the block origin would span beyond the end of the
			// big block we're currently analyzing, split it into {A, B},
			// where A fits into the current big block, and B is the rest
			for comp.Size+bo.Size > bigBlockSize {
				truncatedSize := bigBlockSize - comp.Size

				// truncatedSize may be 0 if `comp.Size == bigBlockSize`, ie. comp already
				// explains all the contents of the current big block - in this case,
				// we keep this BlockOrigin intact for the next iteration of the loop
				// (during which comp.Size will == 0)
				if truncatedSize > 0 {
					// this is A
					comp.Append(&BlockOrigin{
						FileIndex: rop.FileIndex,
						Offset:    bo.Offset,
						Size:      truncatedSize,
					})

					// and bo becomes B
					bo.Offset += truncatedSize
					bo.Size -= truncatedSize
				}

				onComp(comp)

				// after sending over the composition, we allocate a new one - same file, next block
				// (sent comps should not be modified afterwards)
				comp = &Composition{
					FileIndex:  int64(fileIndex),
					BlockIndex: comp.BlockIndex + 1,
				}
			}

			// after all the splitting, there might still be some data left over
			// (that's smaller than bigBlockSize)
			if bo.Size > 0 {
				comp.Append(bo)
			}
		case pwr.SyncOp_DATA:
			// Data SyncOps are not aligned either in target or source. Since genie
			// works in byte offsets, this suits us just fine.
			fo := &FreshOrigin{
				Size: int64(len(rop.Data)),
			}

			for comp.Size+fo.Size > bigBlockSize {
				truncatedSize := bigBlockSize - comp.Size

				// only if we can fit some of the data in this block, otherwise, clear
				// the current comp, wait for next loop iteration where comp.Size will be 0
				if truncatedSize > 0 {
					comp.Append(&FreshOrigin{
						Size: truncatedSize,
					})

					fo.Size -= truncatedSize
				}

				onComp(comp)

				// allocate a new comp after sending, so we don't write to the previous one
				comp = &Composition{
					FileIndex:  int64(fileIndex),
					BlockIndex: comp.BlockIndex + 1,
				}
			}

			if fo.Size > 0 {
				comp.Append(fo)
			}
		case pwr.SyncOp_HEY_YOU_DID_IT:
			if comp.Size > 0 && fileSize > 0 {
				onComp(comp)
			}
			return nil
		}
	}
}
コード例 #4
0
ファイル: apply.go プロジェクト: itchio/butler
func (actx *ApplyContext) patchAll(patchWire *wire.ReadContext, signature *SignatureInfo) (retErr error) {
	sourceContainer := actx.SourceContainer

	var validatingPool *ValidatingPool
	consumerErrs := make(chan error, 1)

	outputPool := actx.OutputPool
	if outputPool == nil {
		outputPool = fspool.New(sourceContainer, actx.OutputPath)
	}

	if signature != nil {
		validatingPool = &ValidatingPool{
			Pool:      outputPool,
			Container: sourceContainer,
			Signature: signature,
		}

		if actx.WoundsPath != "" {
			validatingPool.Wounds = make(chan *Wound)

			actx.WoundsConsumer = &WoundsWriter{
				WoundsPath: actx.WoundsPath,
			}
		}

		if actx.WoundsConsumer != nil {
			go func() {
				consumerErrs <- actx.WoundsConsumer.Do(signature.Container, validatingPool.Wounds)
			}()
		}

		outputPool = validatingPool
	}

	targetContainer := actx.TargetContainer
	targetPool := actx.TargetPool
	if targetPool == nil {
		if actx.TargetPath == "" {
			return fmt.Errorf("apply: need either TargetPool or TargetPath")
		}
		var cErr error
		targetPool, cErr = pools.New(targetContainer, actx.TargetPath)
		if cErr != nil {
			return cErr
		}
	}

	fileOffset := int64(0)
	sourceBytes := sourceContainer.Size
	onSourceWrite := func(count int64) {
		// we measure patching progress as the number of total bytes written
		// to the source container. no-ops (untouched files) count too, so the
		// progress bar may jump ahead a bit at times, but that's a good surprise
		// measuring progress by bytes of the patch read would just be a different
		// kind of inaccuracy (due to decompression buffers, etc.)
		actx.Consumer.Progress(float64(fileOffset+count) / float64(sourceBytes))
	}

	sctx := mksync()
	sh := &SyncHeader{}

	// transpositions, indexed by TargetPath
	transpositions := make(map[string][]*Transposition)
	actx.transpositions = transpositions

	defer func() {
		var closeErr error
		closeErr = targetPool.Close()
		if closeErr != nil {
			if retErr == nil {
				retErr = errors.Wrap(closeErr, 1)
			}
		}

		closeErr = outputPool.Close()
		if closeErr != nil {
			if retErr == nil {
				retErr = errors.Wrap(closeErr, 1)
			}
		}

		if validatingPool != nil {
			if validatingPool.Wounds != nil {
				close(validatingPool.Wounds)
			}
		}

		if actx.WoundsConsumer != nil {
			taskErr := <-consumerErrs
			if taskErr != nil {
				if retErr == nil {
					retErr = errors.Wrap(taskErr, 1)
				}
			}
		}
	}()

	for fileIndex, f := range sourceContainer.Files {
		actx.Consumer.ProgressLabel(f.Path)
		actx.Consumer.Debug(f.Path)
		fileOffset = f.Offset

		// each series of patch operations is preceded by a SyncHeader giving
		// us the file index - it's a super basic measure to make sure the
		// patch file we're reading and the patching algorithm somewhat agree
		// on what's happening.
		sh.Reset()
		err := patchWire.ReadMessage(sh)
		if err != nil {
			retErr = errors.Wrap(err, 1)
			return
		}

		if sh.FileIndex != int64(fileIndex) {
			fmt.Printf("expected fileIndex = %d, got fileIndex %d\n", fileIndex, sh.FileIndex)
			retErr = errors.Wrap(ErrMalformedPatch, 1)
			return
		}

		ops := make(chan wsync.Operation)
		errc := make(chan error, 1)

		go readOps(patchWire, ops, errc)

		bytesWritten, transposition, err := actx.lazilyPatchFile(sctx, targetContainer, targetPool, sourceContainer, outputPool, sh.FileIndex, onSourceWrite, ops, actx.InPlace)
		if err != nil {
			select {
			case nestedErr := <-errc:
				if nestedErr != nil {
					actx.Consumer.Debugf("Had an error while reading ops: %s", nestedErr.Error())
				}
			default:
				// no nested error
			}

			retErr = errors.Wrap(err, 1)
			return
		}

		if transposition != nil {
			transpositions[transposition.TargetPath] = append(transpositions[transposition.TargetPath], transposition)
		} else {
			actx.Stats.TouchedFiles++
			if bytesWritten != f.Size {
				retErr = fmt.Errorf("%s: expected to write %d bytes, wrote %d bytes", f.Path, f.Size, bytesWritten)
				return
			}
		}

		// using errc to signal the end of processing, rather than having a separate
		// done channel. not sure if there's any upside to either
		err = <-errc
		if err != nil {
			retErr = err
			return
		}
	}

	err := actx.applyTranspositions(transpositions)
	if err != nil {
		retErr = err
		return
	}

	return
}