func appendAllToFile(src io.Reader, dest string, existingBytes int64, totalBytes int64) error { out, _ := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) defer out.Close() prevPercent := 0.0 comm.StartProgress() onWrite := func(bytesDownloaded int64) { bytesWritten := existingBytes + bytesDownloaded percent := float64(bytesWritten) / float64(totalBytes) if math.Abs(percent-prevPercent) < 0.0001 { return } prevPercent = percent comm.Progress(percent) } counter := counter.NewWriterCallback(onWrite, out) _, err := io.Copy(counter, src) if err != nil { return errors.Wrap(err, 1) } comm.EndProgress() return nil }
func (ah *ArchiveHealer) healOne(sourcePool wsync.Pool, targetPool wsync.WritablePool, fileIndex int64, chunkHealed chunkHealedFunc) error { var err error var reader io.Reader var writer io.WriteCloser reader, err = sourcePool.GetReader(fileIndex) if err != nil { return err } writer, err = targetPool.GetWriter(fileIndex) if err != nil { return err } lastCount := int64(0) cw := counter.NewWriterCallback(func(count int64) { chunk := count - lastCount chunkHealed(chunk) lastCount = count }, writer) _, err = io.Copy(cw, reader) if err != nil { return err } err = writer.Close() if err != nil { return err } return err }
// CopyContainer copies from one container to the other. Combined with fspool // and blockpool, it can be used to split a container into blocks or join it back // into regular files. func CopyContainer(container *tlc.Container, outPool wsync.WritablePool, inPool wsync.Pool, consumer *state.Consumer) error { copyFile := func(byteOffset int64, fileIndex int64) error { r, err := inPool.GetReader(fileIndex) if err != nil { return err } w, err := outPool.GetWriter(fileIndex) if err != nil { return err } cw := counter.NewWriterCallback(func(count int64) { alpha := float64(byteOffset+count) / float64(container.Size) consumer.Progress(alpha) }, w) _, err = io.Copy(cw, r) if err != nil { return err } err = w.Close() if err != nil { return err } return nil } byteOffset := int64(0) for fileIndex, f := range container.Files { consumer.ProgressLabel(f.Path) err := copyFile(byteOffset, int64(fileIndex)) if err != nil { return err } byteOffset += f.Size } return nil }
func NewResumableUpload(uploadURL string, done chan bool, errs chan error, settings ResumableUploadSettings) (*ResumableUpload, error) { ru := &ResumableUpload{} ru.MaxChunkGroup = settings.MaxChunkGroup if ru.MaxChunkGroup == 0 { ru.MaxChunkGroup = 64 } ru.uploadURL = uploadURL ru.id = seed seed++ ru.consumer = settings.Consumer ru.httpClient = timeout.NewClient(resumableConnectTimeout, resumableIdleTimeout) pipeR, pipeW := io.Pipe() ru.pipeWriter = pipeW bufferSize := settings.BufferSize if bufferSize == 0 { bufferSize = 32 * 1024 * 1024 } bufferedWriter := bufio.NewWriterSize(pipeW, bufferSize) ru.bufferedWriter = bufferedWriter onWrite := func(count int64) { // ru.Debugf("onwrite %d", count) ru.TotalBytes = count if ru.OnProgress != nil { ru.OnProgress() } } ru.writeCounter = counter.NewWriterCallback(onWrite, bufferedWriter) go ru.uploadChunks(pipeR, done, errs) return ru, nil }
func doCp(srcPath string, destPath string, resume bool) error { src, err := eos.Open(srcPath) if err != nil { return err } defer src.Close() dir := filepath.Dir(destPath) err = os.MkdirAll(dir, 0755) if err != nil { return err } flags := os.O_CREATE | os.O_WRONLY dest, err := os.OpenFile(destPath, flags, 0644) if err != nil { return err } defer dest.Close() stats, err := src.Stat() if err != nil { return err } totalBytes := int64(stats.Size()) startOffset := int64(0) if resume { startOffset, err = dest.Seek(0, os.SEEK_END) if err != nil { return err } if startOffset == 0 { comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes))) } else if startOffset > totalBytes { comm.Logf("Existing data too big (%s > %s), starting over", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes))) } else if startOffset == totalBytes { comm.Logf("All %s already there", humanize.IBytes(uint64(totalBytes))) return nil } comm.Logf("Resuming at %s / %s", humanize.IBytes(uint64(startOffset)), humanize.IBytes(uint64(totalBytes))) _, err = src.Seek(startOffset, os.SEEK_SET) if err != nil { return err } } else { comm.Logf("Downloading %s", humanize.IBytes(uint64(totalBytes))) } start := time.Now() comm.Progress(float64(startOffset) / float64(totalBytes)) comm.StartProgressWithTotalBytes(totalBytes) cw := counter.NewWriterCallback(func(count int64) { alpha := float64(startOffset+count) / float64(totalBytes) comm.Progress(alpha) }, dest) copiedBytes, err := io.Copy(cw, src) if err != nil { return err } comm.EndProgress() totalDuration := time.Since(start) prettyStartOffset := humanize.IBytes(uint64(startOffset)) prettySize := humanize.IBytes(uint64(copiedBytes)) perSecond := humanize.IBytes(uint64(float64(totalBytes-startOffset) / totalDuration.Seconds())) comm.Statf("%s + %s copied @ %s/s\n", prettyStartOffset, prettySize, perSecond) return nil }
func (actx *ApplyContext) lazilyPatchFile(sctx *wsync.Context, targetContainer *tlc.Container, targetPool wsync.Pool, outputContainer *tlc.Container, outputPool wsync.WritablePool, fileIndex int64, onSourceWrite counter.CountCallback, ops chan wsync.Operation, inplace bool) (written int64, transposition *Transposition, err error) { var writer io.WriteCloser defer func() { if writer != nil { cErr := writer.Close() if cErr != nil && err == nil { err = cErr } } }() var realops chan wsync.Operation errs := make(chan error, 1) first := true for op := range ops { if first { first = false // if the first operation is a blockrange that copies an // entire file from target into a file from source that has // the same name and size, then it's a no-op! if inplace && op.Type == wsync.OpBlockRange && op.BlockIndex == 0 { outputFile := outputContainer.Files[fileIndex] targetFile := targetContainer.Files[op.FileIndex] numOutputBlocks := ComputeNumBlocks(outputFile.Size) if op.BlockSpan == numOutputBlocks && outputFile.Size == targetFile.Size { transposition = &Transposition{ TargetPath: targetFile.Path, OutputPath: outputFile.Path, } } } if transposition != nil { errs <- nil } else { realops = make(chan wsync.Operation) writer, err = outputPool.GetWriter(fileIndex) if err != nil { return 0, nil, errors.Wrap(err, 1) } writeCounter := counter.NewWriterCallback(onSourceWrite, writer) go func() { failFast := true if actx.WoundsConsumer != nil { failFast = false } applyErr := sctx.ApplyPatchFull(writeCounter, targetPool, realops, failFast) if applyErr != nil { errs <- applyErr return } written = writeCounter.Count() errs <- nil }() } } // if not a transposition, relay errors if transposition == nil { select { case cErr := <-errs: // if we get an error here, ApplyPatch failed so we no longer need to close realops if cErr != nil { return 0, nil, errors.Wrap(cErr, 1) } case realops <- op: // muffin } } } if transposition == nil { // realops may be nil if the file was empty (0 ops) if realops != nil { close(realops) } else { // if we had 0 ops, signal no errors occured errs <- nil } } err = <-errs if err != nil { return 0, nil, errors.Wrap(err, 1) } return }
func (vctx *ValidatorContext) validate(target string, signature *SignatureInfo, fileIndices chan int64, errs chan error, onProgress onProgressFunc, cancelled chan struct{}) { var retErr error targetPool, err := pools.New(signature.Container, target) if err != nil { errs <- err return } defer func() { err := targetPool.Close() if err != nil { retErr = errors.Wrap(err, 1) return } errs <- retErr }() aggregateOut := make(chan *Wound) relayDone := make(chan bool) go func() { for w := range aggregateOut { vctx.Wounds <- w } relayDone <- true }() wounds := AggregateWounds(aggregateOut, MaxWoundSize) defer func() { // signal no more wounds are going to be sent close(wounds) // wait for all of them to be relayed <-relayDone }() validatingPool := &ValidatingPool{ Pool: nullpool.New(signature.Container), Container: signature.Container, Signature: signature, Wounds: wounds, } doOne := func(fileIndex int64) error { file := signature.Container.Files[fileIndex] var reader io.Reader reader, err = targetPool.GetReader(fileIndex) if err != nil { if os.IsNotExist(err) { // whole file is missing wound := &Wound{ Kind: WoundKind_FILE, Index: fileIndex, Start: 0, End: file.Size, } onProgress(file.Size) select { case wounds <- wound: case <-cancelled: } return nil } return err } var writer io.WriteCloser writer, err = validatingPool.GetWriter(fileIndex) if err != nil { return err } defer writer.Close() lastCount := int64(0) countingWriter := counter.NewWriterCallback(func(count int64) { delta := count - lastCount onProgress(delta) lastCount = count }, writer) var writtenBytes int64 writtenBytes, err = io.Copy(countingWriter, reader) if err != nil { return err } if writtenBytes != file.Size { onProgress(file.Size - writtenBytes) wound := &Wound{ Kind: WoundKind_FILE, Index: fileIndex, Start: writtenBytes, End: file.Size, } select { case wounds <- wound: case <-cancelled: } } return nil } for { select { case fileIndex, ok := <-fileIndices: if !ok { // no more work return } err := doOne(fileIndex) if err != nil { if retErr == nil { retErr = err } return } case <-cancelled: // cancelled return } } }