Esempio n. 1
0
func Test_CopyContainer(t *testing.T) {
	mainDir, err := ioutil.TempDir("", "copycontainer")
	assert.NoError(t, err)
	defer os.RemoveAll(mainDir)

	src := path.Join(mainDir, "src")
	dst := path.Join(mainDir, "dst")
	makeTestDir(t, src, testDirSettings{
		seed: 0x91738,
		entries: []testDirEntry{
			{path: "subdir/file-1", seed: 0x1},
			{path: "file-1", seed: 0x2},
			{path: "file-2", seed: 0x3},
		},
	})

	container, err := tlc.WalkAny(src, nil)
	assert.NoError(t, err)

	inPool := fspool.New(container, src)
	outPool := fspool.New(container, dst)

	err = CopyContainer(container, outPool, inPool, &state.Consumer{})
	assert.NoError(t, err)

	assert.NoError(t, inPool.Close())
}
Esempio n. 2
0
func New(c *tlc.Container, basePath string) (wsync.Pool, error) {
	if basePath == "/dev/null" {
		return fspool.New(c, basePath), nil
	}

	fr, err := eos.Open(basePath)
	if err != nil {
		return nil, errors.Wrap(err, 1)
	}

	targetInfo, err := fr.Stat()
	if err != nil {
		return nil, errors.Wrap(err, 1)
	}

	if targetInfo.IsDir() {
		err := fr.Close()
		if err != nil {
			return nil, err
		}

		return fspool.New(c, basePath), nil
	} else {
		zr, err := zip.NewReader(fr, targetInfo.Size())
		if err != nil {
			return nil, errors.Wrap(err, 1)
		}

		return zippool.New(c, zr), nil
	}
}
Esempio n. 3
0
func (actx *ApplyContext) patchAll(patchWire *wire.ReadContext, signature *SignatureInfo) (retErr error) {
	sourceContainer := actx.SourceContainer

	var validatingPool *ValidatingPool
	consumerErrs := make(chan error, 1)

	outputPool := actx.OutputPool
	if outputPool == nil {
		outputPool = fspool.New(sourceContainer, actx.OutputPath)
	}

	if signature != nil {
		validatingPool = &ValidatingPool{
			Pool:      outputPool,
			Container: sourceContainer,
			Signature: signature,
		}

		if actx.WoundsPath != "" {
			validatingPool.Wounds = make(chan *Wound)

			actx.WoundsConsumer = &WoundsWriter{
				WoundsPath: actx.WoundsPath,
			}
		}

		if actx.WoundsConsumer != nil {
			go func() {
				consumerErrs <- actx.WoundsConsumer.Do(signature.Container, validatingPool.Wounds)
			}()
		}

		outputPool = validatingPool
	}

	targetContainer := actx.TargetContainer
	targetPool := actx.TargetPool
	if targetPool == nil {
		if actx.TargetPath == "" {
			return fmt.Errorf("apply: need either TargetPool or TargetPath")
		}
		var cErr error
		targetPool, cErr = pools.New(targetContainer, actx.TargetPath)
		if cErr != nil {
			return cErr
		}
	}

	fileOffset := int64(0)
	sourceBytes := sourceContainer.Size
	onSourceWrite := func(count int64) {
		// we measure patching progress as the number of total bytes written
		// to the source container. no-ops (untouched files) count too, so the
		// progress bar may jump ahead a bit at times, but that's a good surprise
		// measuring progress by bytes of the patch read would just be a different
		// kind of inaccuracy (due to decompression buffers, etc.)
		actx.Consumer.Progress(float64(fileOffset+count) / float64(sourceBytes))
	}

	sctx := mksync()
	sh := &SyncHeader{}

	// transpositions, indexed by TargetPath
	transpositions := make(map[string][]*Transposition)
	actx.transpositions = transpositions

	defer func() {
		var closeErr error
		closeErr = targetPool.Close()
		if closeErr != nil {
			if retErr == nil {
				retErr = errors.Wrap(closeErr, 1)
			}
		}

		closeErr = outputPool.Close()
		if closeErr != nil {
			if retErr == nil {
				retErr = errors.Wrap(closeErr, 1)
			}
		}

		if validatingPool != nil {
			if validatingPool.Wounds != nil {
				close(validatingPool.Wounds)
			}
		}

		if actx.WoundsConsumer != nil {
			taskErr := <-consumerErrs
			if taskErr != nil {
				if retErr == nil {
					retErr = errors.Wrap(taskErr, 1)
				}
			}
		}
	}()

	for fileIndex, f := range sourceContainer.Files {
		actx.Consumer.ProgressLabel(f.Path)
		actx.Consumer.Debug(f.Path)
		fileOffset = f.Offset

		// each series of patch operations is preceded by a SyncHeader giving
		// us the file index - it's a super basic measure to make sure the
		// patch file we're reading and the patching algorithm somewhat agree
		// on what's happening.
		sh.Reset()
		err := patchWire.ReadMessage(sh)
		if err != nil {
			retErr = errors.Wrap(err, 1)
			return
		}

		if sh.FileIndex != int64(fileIndex) {
			fmt.Printf("expected fileIndex = %d, got fileIndex %d\n", fileIndex, sh.FileIndex)
			retErr = errors.Wrap(ErrMalformedPatch, 1)
			return
		}

		ops := make(chan wsync.Operation)
		errc := make(chan error, 1)

		go readOps(patchWire, ops, errc)

		bytesWritten, transposition, err := actx.lazilyPatchFile(sctx, targetContainer, targetPool, sourceContainer, outputPool, sh.FileIndex, onSourceWrite, ops, actx.InPlace)
		if err != nil {
			select {
			case nestedErr := <-errc:
				if nestedErr != nil {
					actx.Consumer.Debugf("Had an error while reading ops: %s", nestedErr.Error())
				}
			default:
				// no nested error
			}

			retErr = errors.Wrap(err, 1)
			return
		}

		if transposition != nil {
			transpositions[transposition.TargetPath] = append(transpositions[transposition.TargetPath], transposition)
		} else {
			actx.Stats.TouchedFiles++
			if bytesWritten != f.Size {
				retErr = fmt.Errorf("%s: expected to write %d bytes, wrote %d bytes", f.Path, f.Size, bytesWritten)
				return
			}
		}

		// using errc to signal the end of processing, rather than having a separate
		// done channel. not sure if there's any upside to either
		err = <-errc
		if err != nil {
			retErr = err
			return
		}
	}

	err := actx.applyTranspositions(transpositions)
	if err != nil {
		retErr = err
		return
	}

	return
}
Esempio n. 4
0
// Do starts receiving from the wounds channel and healing
func (ah *ArchiveHealer) Do(container *tlc.Container, wounds chan *Wound) error {
	ah.container = container

	files := make(map[int64]bool)
	fileIndices := make(chan int64)

	if ah.NumWorkers == 0 {
		ah.NumWorkers = runtime.NumCPU() + 1
	}

	defer ah.File.Close()

	stat, err := ah.File.Stat()
	if err != nil {
		return err
	}

	zipReader, err := zip.NewReader(ah.File, stat.Size())
	if err != nil {
		return errors.Wrap(err, 1)
	}

	targetPool := fspool.New(container, ah.Target)

	errs := make(chan error)
	done := make(chan bool, ah.NumWorkers)
	cancelled := make(chan struct{})

	onChunkHealed := func(healedChunk int64) {
		atomic.AddInt64(&ah.totalHealed, healedChunk)
		ah.updateProgress()
	}

	for i := 0; i < ah.NumWorkers; i++ {
		go ah.heal(container, zipReader, stat.Size(), targetPool, fileIndices, errs, done, cancelled, onChunkHealed)
	}

	processWound := func(wound *Wound) error {
		if !wound.Healthy() {
			ah.totalCorrupted += wound.Size()
			ah.hasWounds = true
		}

		switch wound.Kind {
		case WoundKind_DIR:
			dirEntry := container.Dirs[wound.Index]
			path := filepath.Join(ah.Target, filepath.FromSlash(dirEntry.Path))

			pErr := os.MkdirAll(path, 0755)
			if pErr != nil {
				return pErr
			}

		case WoundKind_SYMLINK:
			symlinkEntry := container.Symlinks[wound.Index]
			path := filepath.Join(ah.Target, filepath.FromSlash(symlinkEntry.Path))

			dir := filepath.Dir(path)
			pErr := os.MkdirAll(dir, 0755)
			if pErr != nil {
				return pErr
			}

			pErr = os.Symlink(symlinkEntry.Dest, path)
			if pErr != nil {
				return pErr
			}

		case WoundKind_FILE:
			if files[wound.Index] {
				// already queued
				return nil
			}

			file := container.Files[wound.Index]
			if ah.Consumer != nil {
				ah.Consumer.ProgressLabel(file.Path)
			}

			atomic.AddInt64(&ah.totalHealing, file.Size)
			ah.updateProgress()
			files[wound.Index] = true

			select {
			case pErr := <-errs:
				return pErr
			case fileIndices <- wound.Index:
				// queued for work!
			}

		case WoundKind_CLOSED_FILE:
			if files[wound.Index] {
				// already healing whole file
			} else {
				fileSize := container.Files[wound.Index].Size

				// whole file was healthy
				if wound.End == fileSize {
					atomic.AddInt64(&ah.totalHealthy, fileSize)
				}
			}

		default:
			return fmt.Errorf("unknown wound kind: %d", wound.Kind)
		}

		return nil
	}

	for wound := range wounds {
		err = processWound(wound)
		if err != nil {
			close(fileIndices)
			close(cancelled)
			return errors.Wrap(err, 1)
		}
	}

	// queued everything
	close(fileIndices)

	// expecting up to NumWorkers done, some may still
	// send errors
	for i := 0; i < ah.NumWorkers; i++ {
		select {
		case err = <-errs:
			close(cancelled)
			return errors.Wrap(err, 1)
		case <-done:
			// good!
		}
	}

	return nil
}
Esempio n. 5
0
func runPatchingScenario(t *testing.T, scenario patchScenario) {
	log := func(format string, args ...interface{}) {
		t.Logf("[%s] %s", scenario.name, fmt.Sprintf(format, args...))
	}
	log("Scenario start")

	mainDir, err := ioutil.TempDir("", "patch-cycle")
	assert.NoError(t, err)
	assert.NoError(t, os.MkdirAll(mainDir, 0755))
	defer os.RemoveAll(mainDir)

	v1 := filepath.Join(mainDir, "v1")
	makeTestDir(t, v1, scenario.v1)

	v2 := filepath.Join(mainDir, "v2")
	makeTestDir(t, v2, scenario.v2)

	compression := &CompressionSettings{}
	compression.Algorithm = CompressionAlgorithm_NONE

	sourceContainer, err := tlc.WalkAny(v2, nil)
	assert.NoError(t, err)

	consumer := &state.Consumer{}
	patchBuffer := new(bytes.Buffer)
	signatureBuffer := new(bytes.Buffer)

	func() {
		targetContainer, dErr := tlc.WalkAny(v1, nil)
		assert.NoError(t, dErr)

		targetPool := fspool.New(targetContainer, v1)
		targetSignature, dErr := ComputeSignature(targetContainer, targetPool, consumer)
		assert.NoError(t, dErr)

		pool := fspool.New(sourceContainer, v2)

		dctx := &DiffContext{
			Compression: compression,
			Consumer:    consumer,

			SourceContainer: sourceContainer,
			Pool:            pool,

			TargetContainer: targetContainer,
			TargetSignature: targetSignature,
		}

		assert.NoError(t, dctx.WritePatch(patchBuffer, signatureBuffer))
	}()

	v1Before := filepath.Join(mainDir, "v1Before")
	cpDir(t, v1, v1Before)

	v1After := filepath.Join(mainDir, "v1After")

	woundsPath := filepath.Join(mainDir, "wounds.pww")

	if scenario.extraTests {
		log("Making sure before-path folder doesn't validate")
		signature, sErr := ReadSignature(bytes.NewReader(signatureBuffer.Bytes()))
		assert.NoError(t, sErr)
		assert.Error(t, AssertValid(v1Before, signature))

		runExtraTest := func(setup SetupFunc) error {
			assert.NoError(t, os.RemoveAll(woundsPath))
			assert.NoError(t, os.RemoveAll(v1Before))
			cpDir(t, v1, v1Before)

			actx := &ApplyContext{
				TargetPath: v1Before,
				OutputPath: v1Before,

				InPlace:  true,
				Consumer: consumer,
			}
			if setup != nil {
				setup(actx)
			}

			patchReader := bytes.NewReader(patchBuffer.Bytes())

			aErr := actx.ApplyPatch(patchReader)
			if aErr != nil {
				return aErr
			}

			if actx.Signature == nil {
				vErr := AssertValid(v1Before, signature)
				if vErr != nil {
					return vErr
				}
			}

			return nil
		}

		func() {
			log("In-place with failing vet")
			var NotVettingError = errors.New("not vetting this")
			pErr := runExtraTest(func(actx *ApplyContext) {
				actx.VetApply = func(actx *ApplyContext) error {
					return NotVettingError
				}
			})
			assert.Error(t, pErr)
			assert.True(t, errors.Is(pErr, NotVettingError))
		}()

		func() {
			log("In-place with signature (failfast, passing)")
			assert.NoError(t, runExtraTest(func(actx *ApplyContext) {
				actx.Signature = signature
			}))
		}()

		func() {
			log("In-place with signature (failfast, failing)")
			assert.Error(t, runExtraTest(func(actx *ApplyContext) {
				actx.Signature = signature
				makeTestDir(t, v1Before, *scenario.corruptions)
			}))
		}()

		func() {
			log("In-place with signature (wounds, passing)")
			assert.NoError(t, runExtraTest(func(actx *ApplyContext) {
				actx.Signature = signature
				actx.WoundsPath = woundsPath
			}))

			_, sErr := os.Lstat(woundsPath)
			assert.Error(t, sErr)
			assert.True(t, os.IsNotExist(sErr))
		}()

		func() {
			log("In-place with signature (wounds, failing)")
			assert.NoError(t, runExtraTest(func(actx *ApplyContext) {
				actx.Signature = signature
				actx.WoundsPath = woundsPath
				makeTestDir(t, v1Before, *scenario.corruptions)
			}))

			_, sErr := os.Lstat(woundsPath)
			assert.NoError(t, sErr)
		}()
	}

	log("Applying to other directory, with separate check")
	assert.NoError(t, os.RemoveAll(v1Before))
	cpDir(t, v1, v1Before)

	func() {
		actx := &ApplyContext{
			TargetPath: v1Before,
			OutputPath: v1After,

			Consumer: consumer,
		}

		patchReader := bytes.NewReader(patchBuffer.Bytes())

		aErr := actx.ApplyPatch(patchReader)
		assert.NoError(t, aErr)

		assert.Equal(t, 0, actx.Stats.DeletedFiles, "deleted files (other dir)")
		assert.Equal(t, 0, actx.Stats.DeletedDirs, "deleted dirs (other dir)")
		assert.Equal(t, 0, actx.Stats.DeletedSymlinks, "deleted symlinks (other dir)")
		assert.Equal(t, 0, actx.Stats.MovedFiles, "moved files (other dir)")
		assert.Equal(t, len(sourceContainer.Files), actx.Stats.TouchedFiles, "touched files (other dir)")
		assert.Equal(t, 0, actx.Stats.NoopFiles, "noop files (other dir)")

		signature, sErr := ReadSignature(bytes.NewReader(signatureBuffer.Bytes()))
		assert.NoError(t, sErr)

		assert.NoError(t, AssertValid(v1After, signature))
	}()

	log("Applying in-place")

	testAll := func(setup SetupFunc) {
		assert.NoError(t, os.RemoveAll(v1After))
		assert.NoError(t, os.RemoveAll(v1Before))
		cpDir(t, v1, v1Before)

		func() {
			actx := &ApplyContext{
				TargetPath: v1Before,
				OutputPath: v1Before,

				InPlace: true,

				Consumer: consumer,
			}
			if setup != nil {
				setup(actx)
			}

			patchReader := bytes.NewReader(patchBuffer.Bytes())

			aErr := actx.ApplyPatch(patchReader)
			assert.NoError(t, aErr)

			assert.Equal(t, scenario.deletedFiles, actx.Stats.DeletedFiles, "deleted files (in-place)")
			assert.Equal(t, scenario.deletedSymlinks, actx.Stats.DeletedSymlinks, "deleted symlinks (in-place)")
			assert.Equal(t, scenario.deletedDirs+scenario.leftDirs, actx.Stats.DeletedDirs, "deleted dirs (in-place)")
			assert.Equal(t, scenario.touchedFiles, actx.Stats.TouchedFiles, "touched files (in-place)")
			assert.Equal(t, scenario.movedFiles, actx.Stats.MovedFiles, "moved files (in-place)")
			assert.Equal(t, len(sourceContainer.Files)-scenario.touchedFiles-scenario.movedFiles, actx.Stats.NoopFiles, "noop files (in-place)")

			signature, sErr := ReadSignature(bytes.NewReader(signatureBuffer.Bytes()))
			assert.NoError(t, sErr)

			assert.NoError(t, AssertValid(v1Before, signature))
		}()

		if scenario.intermediate != nil {
			log("Applying in-place with %d intermediate files", len(scenario.intermediate.entries))

			assert.NoError(t, os.RemoveAll(v1After))
			assert.NoError(t, os.RemoveAll(v1Before))
			cpDir(t, v1, v1Before)

			makeTestDir(t, v1Before, *scenario.intermediate)

			func() {
				actx := &ApplyContext{
					TargetPath: v1Before,
					OutputPath: v1Before,

					InPlace: true,

					Consumer: consumer,
				}
				if setup != nil {
					setup(actx)
				}

				patchReader := bytes.NewReader(patchBuffer.Bytes())

				aErr := actx.ApplyPatch(patchReader)
				assert.NoError(t, aErr)

				assert.Equal(t, scenario.deletedFiles, actx.Stats.DeletedFiles, "deleted files (in-place w/intermediate)")
				assert.Equal(t, scenario.deletedDirs, actx.Stats.DeletedDirs, "deleted dirs (in-place w/intermediate)")
				assert.Equal(t, scenario.deletedSymlinks, actx.Stats.DeletedSymlinks, "deleted symlinks (in-place w/intermediate)")
				assert.Equal(t, scenario.touchedFiles, actx.Stats.TouchedFiles, "touched files (in-place w/intermediate)")
				assert.Equal(t, scenario.noopFiles, actx.Stats.NoopFiles, "noop files (in-place w/intermediate)")
				assert.Equal(t, scenario.leftDirs, actx.Stats.LeftDirs, "left dirs (in-place w/intermediate)")

				signature, sErr := ReadSignature(bytes.NewReader(signatureBuffer.Bytes()))
				assert.NoError(t, sErr)

				assert.NoError(t, AssertValid(v1Before, signature))
			}()
		}
	}

	testAll(nil)

	if scenario.testBrokenRename {
		testAll(func(actx *ApplyContext) {
			actx.debugBrokenRename = true
		})
	}
}