Beispiel #1
0
func (c *Container) FixPermissions(pool wsync.Pool) error {
	defer pool.Close()

	buf := make([]byte, minScannedFileSize)
	for index, f := range c.Files {
		if f.Size < minScannedFileSize {
			continue
		}

		r, err := pool.GetReader(int64(index))
		if err != nil {
			return errors.Wrap(err, 1)
		}

		_, err = io.ReadFull(r, buf)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		if isExecutable(buf) {
			f.Mode |= 0111
		}
	}

	return nil
}
Beispiel #2
0
func (ah *ArchiveHealer) healOne(sourcePool wsync.Pool, targetPool wsync.WritablePool, fileIndex int64, chunkHealed chunkHealedFunc) error {
	var err error
	var reader io.Reader
	var writer io.WriteCloser

	reader, err = sourcePool.GetReader(fileIndex)
	if err != nil {
		return err
	}

	writer, err = targetPool.GetWriter(fileIndex)
	if err != nil {
		return err
	}

	lastCount := int64(0)
	cw := counter.NewWriterCallback(func(count int64) {
		chunk := count - lastCount
		chunkHealed(chunk)
		lastCount = count
	}, writer)

	_, err = io.Copy(cw, reader)
	if err != nil {
		return err
	}

	err = writer.Close()
	if err != nil {
		return err
	}

	return err
}
Beispiel #3
0
func (ah *ArchiveHealer) heal(container *tlc.Container, zipReader *zip.Reader, zipSize int64,
	targetPool wsync.WritablePool,
	fileIndices chan int64, errs chan error, done chan bool, cancelled chan struct{}, chunkHealed chunkHealedFunc) {

	var sourcePool wsync.Pool
	var err error

	sourcePool = zippool.New(container, zipReader)
	defer sourcePool.Close()

	for {
		select {
		case <-cancelled:
			// something else stopped the healing
			return
		case fileIndex, ok := <-fileIndices:
			if !ok {
				// no more files to heal
				done <- true
				return
			}

			err = ah.healOne(sourcePool, targetPool, fileIndex, chunkHealed)
			if err != nil {
				select {
				case <-cancelled:
					// already cancelled, no need for more errors
					return
				case errs <- err:
					return
				}
			}
		}
	}
}
Beispiel #4
0
// CopyContainer copies from one container to the other. Combined with fspool
// and blockpool, it can be used to split a container into blocks or join it back
// into regular files.
func CopyContainer(container *tlc.Container, outPool wsync.WritablePool, inPool wsync.Pool, consumer *state.Consumer) error {
	copyFile := func(byteOffset int64, fileIndex int64) error {
		r, err := inPool.GetReader(fileIndex)
		if err != nil {
			return err
		}

		w, err := outPool.GetWriter(fileIndex)
		if err != nil {
			return err
		}

		cw := counter.NewWriterCallback(func(count int64) {
			alpha := float64(byteOffset+count) / float64(container.Size)
			consumer.Progress(alpha)
		}, w)

		_, err = io.Copy(cw, r)
		if err != nil {
			return err
		}

		err = w.Close()
		if err != nil {
			return err
		}

		return nil
	}

	byteOffset := int64(0)

	for fileIndex, f := range container.Files {
		consumer.ProgressLabel(f.Path)

		err := copyFile(byteOffset, int64(fileIndex))
		if err != nil {
			return err
		}

		byteOffset += f.Size
	}

	return nil
}
Beispiel #5
0
// ComputeSignatureToWriter is a variant of ComputeSignature that writes hashes
// to a callback
func ComputeSignatureToWriter(container *tlc.Container, pool wsync.Pool, consumer *state.Consumer, sigWriter wsync.SignatureWriter) error {
	var err error

	defer func() {
		if pErr := pool.Close(); pErr != nil && err == nil {
			err = errors.Wrap(pErr, 1)
		}
	}()

	sctx := mksync()

	totalBytes := container.Size
	fileOffset := int64(0)

	onRead := func(count int64) {
		consumer.Progress(float64(fileOffset+count) / float64(totalBytes))
	}

	for fileIndex, f := range container.Files {
		consumer.ProgressLabel(f.Path)
		fileOffset = f.Offset

		var reader io.Reader
		reader, err = pool.GetReader(int64(fileIndex))
		if err != nil {
			return errors.Wrap(err, 1)
		}

		cr := counter.NewReaderCallback(onRead, reader)
		err = sctx.CreateSignature(int64(fileIndex), cr, sigWriter)
		if err != nil {
			return errors.Wrap(err, 1)
		}
	}

	if err != nil {
		return errors.Wrap(err, 1)
	}
	return nil
}
Beispiel #6
0
func CompressZip(archiveWriter io.Writer, container *tlc.Container, pool wsync.Pool, consumer *state.Consumer) (*archiver.CompressResult, error) {
	var err error
	var uncompressedSize int64
	var compressedSize int64

	archiveCounter := counter.NewWriter(archiveWriter)

	zipWriter := zip.NewWriter(archiveCounter)
	defer zipWriter.Close()
	defer func() {
		if zipWriter != nil {
			if zErr := zipWriter.Close(); err == nil && zErr != nil {
				err = errors.Wrap(zErr, 1)
			}
		}
	}()

	for _, dir := range container.Dirs {
		fh := zip.FileHeader{
			Name: dir.Path + "/",
		}
		fh.SetMode(os.FileMode(dir.Mode))
		fh.SetModTime(time.Now())

		_, hErr := zipWriter.CreateHeader(&fh)
		if hErr != nil {
			return nil, errors.Wrap(hErr, 1)
		}
	}

	for fileIndex, file := range container.Files {
		fh := zip.FileHeader{
			Name:               file.Path,
			UncompressedSize64: uint64(file.Size),
			Method:             zip.Deflate,
		}
		fh.SetMode(os.FileMode(file.Mode))
		fh.SetModTime(time.Now())

		entryWriter, eErr := zipWriter.CreateHeader(&fh)
		if eErr != nil {
			return nil, errors.Wrap(eErr, 1)
		}

		entryReader, eErr := pool.GetReader(int64(fileIndex))
		if eErr != nil {
			return nil, errors.Wrap(eErr, 1)
		}

		copiedBytes, eErr := io.Copy(entryWriter, entryReader)
		if eErr != nil {
			return nil, errors.Wrap(eErr, 1)
		}

		uncompressedSize += copiedBytes
	}

	for _, symlink := range container.Symlinks {
		fh := zip.FileHeader{
			Name: symlink.Path,
		}
		fh.SetMode(os.FileMode(symlink.Mode))

		entryWriter, eErr := zipWriter.CreateHeader(&fh)
		if eErr != nil {
			return nil, errors.Wrap(eErr, 1)
		}

		entryWriter.Write([]byte(symlink.Dest))
	}

	err = zipWriter.Close()
	if err != nil {
		return nil, errors.Wrap(err, 1)
	}
	zipWriter = nil

	compressedSize = archiveCounter.Count()

	return &archiver.CompressResult{
		UncompressedSize: uncompressedSize,
		CompressedSize:   compressedSize,
	}, nil
}