func (ah *ArchiveHealer) healOne(sourcePool wsync.Pool, targetPool wsync.WritablePool, fileIndex int64, chunkHealed chunkHealedFunc) error { var err error var reader io.Reader var writer io.WriteCloser reader, err = sourcePool.GetReader(fileIndex) if err != nil { return err } writer, err = targetPool.GetWriter(fileIndex) if err != nil { return err } lastCount := int64(0) cw := counter.NewWriterCallback(func(count int64) { chunk := count - lastCount chunkHealed(chunk) lastCount = count }, writer) _, err = io.Copy(cw, reader) if err != nil { return err } err = writer.Close() if err != nil { return err } return err }
// CopyContainer copies from one container to the other. Combined with fspool // and blockpool, it can be used to split a container into blocks or join it back // into regular files. func CopyContainer(container *tlc.Container, outPool wsync.WritablePool, inPool wsync.Pool, consumer *state.Consumer) error { copyFile := func(byteOffset int64, fileIndex int64) error { r, err := inPool.GetReader(fileIndex) if err != nil { return err } w, err := outPool.GetWriter(fileIndex) if err != nil { return err } cw := counter.NewWriterCallback(func(count int64) { alpha := float64(byteOffset+count) / float64(container.Size) consumer.Progress(alpha) }, w) _, err = io.Copy(cw, r) if err != nil { return err } err = w.Close() if err != nil { return err } return nil } byteOffset := int64(0) for fileIndex, f := range container.Files { consumer.ProgressLabel(f.Path) err := copyFile(byteOffset, int64(fileIndex)) if err != nil { return err } byteOffset += f.Size } return nil }
func (actx *ApplyContext) lazilyPatchFile(sctx *wsync.Context, targetContainer *tlc.Container, targetPool wsync.Pool, outputContainer *tlc.Container, outputPool wsync.WritablePool, fileIndex int64, onSourceWrite counter.CountCallback, ops chan wsync.Operation, inplace bool) (written int64, transposition *Transposition, err error) { var writer io.WriteCloser defer func() { if writer != nil { cErr := writer.Close() if cErr != nil && err == nil { err = cErr } } }() var realops chan wsync.Operation errs := make(chan error, 1) first := true for op := range ops { if first { first = false // if the first operation is a blockrange that copies an // entire file from target into a file from source that has // the same name and size, then it's a no-op! if inplace && op.Type == wsync.OpBlockRange && op.BlockIndex == 0 { outputFile := outputContainer.Files[fileIndex] targetFile := targetContainer.Files[op.FileIndex] numOutputBlocks := ComputeNumBlocks(outputFile.Size) if op.BlockSpan == numOutputBlocks && outputFile.Size == targetFile.Size { transposition = &Transposition{ TargetPath: targetFile.Path, OutputPath: outputFile.Path, } } } if transposition != nil { errs <- nil } else { realops = make(chan wsync.Operation) writer, err = outputPool.GetWriter(fileIndex) if err != nil { return 0, nil, errors.Wrap(err, 1) } writeCounter := counter.NewWriterCallback(onSourceWrite, writer) go func() { failFast := true if actx.WoundsConsumer != nil { failFast = false } applyErr := sctx.ApplyPatchFull(writeCounter, targetPool, realops, failFast) if applyErr != nil { errs <- applyErr return } written = writeCounter.Count() errs <- nil }() } } // if not a transposition, relay errors if transposition == nil { select { case cErr := <-errs: // if we get an error here, ApplyPatch failed so we no longer need to close realops if cErr != nil { return 0, nil, errors.Wrap(cErr, 1) } case realops <- op: // muffin } } } if transposition == nil { // realops may be nil if the file was empty (0 ops) if realops != nil { close(realops) } else { // if we had 0 ops, signal no errors occured errs <- nil } } err = <-errs if err != nil { return 0, nil, errors.Wrap(err, 1) } return }