Пример #1
0
// ComputeSignatureToWriter is a variant of ComputeSignature that writes hashes
// to a callback
func ComputeSignatureToWriter(container *tlc.Container, pool wsync.Pool, consumer *state.Consumer, sigWriter wsync.SignatureWriter) error {
	var err error

	defer func() {
		if pErr := pool.Close(); pErr != nil && err == nil {
			err = errors.Wrap(pErr, 1)
		}
	}()

	sctx := mksync()

	totalBytes := container.Size
	fileOffset := int64(0)

	onRead := func(count int64) {
		consumer.Progress(float64(fileOffset+count) / float64(totalBytes))
	}

	for fileIndex, f := range container.Files {
		consumer.ProgressLabel(f.Path)
		fileOffset = f.Offset

		var reader io.Reader
		reader, err = pool.GetReader(int64(fileIndex))
		if err != nil {
			return errors.Wrap(err, 1)
		}

		cr := counter.NewReaderCallback(onRead, reader)
		err = sctx.CreateSignature(int64(fileIndex), cr, sigWriter)
		if err != nil {
			return errors.Wrap(err, 1)
		}
	}

	if err != nil {
		return errors.Wrap(err, 1)
	}
	return nil
}
Пример #2
0
func (ru *ResumableUpload) trySendBytes(buf []byte, offset int64, isLast bool) error {
	buflen := int64(len(buf))
	ru.Debugf("uploading chunk of %d bytes", buflen)

	body := bytes.NewReader(buf)
	countingReader := counter.NewReaderCallback(func(count int64) {
		ru.UploadedBytes = offset + count
		if ru.OnProgress != nil {
			ru.OnProgress()
		}
	}, body)

	req, err := http.NewRequest("PUT", ru.uploadURL, countingReader)
	if err != nil {
		// does not include HTTP errors, more like golang API usage errors
		return errors.Wrap(err, 1)
	}

	start := offset
	end := start + buflen - 1
	contentRange := fmt.Sprintf("bytes %d-%d/*", offset, end)

	if isLast {
		contentRange = fmt.Sprintf("bytes %d-%d/%d", offset, end, offset+buflen)
	}

	req.Header.Set("content-range", contentRange)
	req.ContentLength = buflen
	ru.Debugf("uploading %d-%d, last? %v, content-length set to %d", start, end, isLast, req.ContentLength)

	startTime := time.Now()

	res, err := ru.httpClient.Do(req)
	if err != nil {
		ru.Debugf("while uploading %d-%d: \n%s", start, end, err.Error())
		return &netError{err, GcsUnknown}
	}

	ru.Debugf("server replied in %s, with status %s", time.Since(startTime), res.Status)
	for k, v := range res.Header {
		ru.Debugf("[Reply header] %s: %s", k, v)
	}

	if buflen != int64(len(buf)) {
		// see https://github.com/itchio/butler/issues/71#issuecomment-243081797
		return &netError{fmt.Errorf("send buffer size changed while we were uploading"), GcsResume}
	}

	status := interpretGcsStatusCode(res.StatusCode)
	if status == GcsUploadComplete && isLast {
		ru.Debugf("upload complete!")
		return nil
	}

	if status == GcsNeedQuery {
		ru.Debugf("need to query upload status (HTTP %s)", res.Status)
		statusRes, err := ru.queryStatus()
		if err != nil {
			// this happens after we retry the query a few times
			return err
		}

		if statusRes.StatusCode == 308 {
			ru.Debugf("got upload status, trying to resume")
			res = statusRes
			status = GcsResume
		} else {
			status = interpretGcsStatusCode(statusRes.StatusCode)
			err = fmt.Errorf("expected upload status, got HTTP %s (%s) instead", statusRes.Status, status)
			ru.Debugf(err.Error())
			return err
		}
	}

	if status == GcsResume {
		expectedOffset := offset + buflen
		rangeHeader := res.Header.Get("Range")
		if rangeHeader == "" {
			ru.Debugf("commit failed (null range), retrying")
			return &retryError{committedBytes: 0}
		}

		committedRange, err := parseRangeHeader(rangeHeader)
		if err != nil {
			return err
		}

		ru.Debugf("got resume, expectedOffset: %d, committedRange: %s", expectedOffset, committedRange)
		if committedRange.start != 0 {
			return fmt.Errorf("upload failed: beginning not committed somehow (committed range: %s)", committedRange)
		}

		if committedRange.end == expectedOffset {
			ru.Debugf("commit succeeded (%d blocks stored)", buflen/gcsChunkSize)
			return nil
		} else {
			committedBytes := committedRange.end - offset
			if committedBytes < 0 {
				return fmt.Errorf("upload failed: committed negative bytes somehow (committed range: %s, expectedOffset: %d)", committedRange, expectedOffset)
			}

			if committedBytes > 0 {
				ru.Debugf("commit partially succeeded (committed %d / %d byte, %d blocks)", committedBytes, buflen, committedBytes/gcsChunkSize)
				return &retryError{committedBytes}
			} else {
				ru.Debugf("commit failed (retrying %d blocks)", buflen/gcsChunkSize)
				return &retryError{committedBytes}
			}
		}
	}

	return fmt.Errorf("got HTTP %d (%s)", res.StatusCode, status)
}
Пример #3
0
func ExtractZip(readerAt io.ReaderAt, size int64, dir string, settings ExtractSettings) (*ExtractResult, error) {
	dirCount := 0
	regCount := 0
	symlinkCount := 0

	reader, err := zip.NewReader(readerAt, size)
	if err != nil {
		return nil, errors.Wrap(err, 1)
	}

	var totalSize int64
	for _, file := range reader.File {
		totalSize += int64(file.UncompressedSize64)
	}

	var doneSize uint64
	var lastDoneIndex int = -1

	func() {
		if settings.ResumeFrom == "" {
			return
		}

		resBytes, resErr := ioutil.ReadFile(settings.ResumeFrom)
		if resErr != nil {
			if !errors.Is(resErr, os.ErrNotExist) {
				settings.Consumer.Warnf("Couldn't read resume file: %s", resErr.Error())
			}
			return
		}

		lastDone64, resErr := strconv.ParseInt(string(resBytes), 10, 64)
		if resErr != nil {
			settings.Consumer.Warnf("Couldn't parse resume file: %s", resErr.Error())
			return
		}

		lastDoneIndex = int(lastDone64)
		settings.Consumer.Infof("Resuming from file %d", lastDoneIndex)
	}()

	warnedAboutWrite := false

	writeProgress := func(fileIndex int) {
		if settings.ResumeFrom == "" {
			return
		}

		payload := fmt.Sprintf("%d", fileIndex)

		wErr := ioutil.WriteFile(settings.ResumeFrom, []byte(payload), 0644)
		if wErr != nil {
			if !warnedAboutWrite {
				warnedAboutWrite = true
				settings.Consumer.Warnf("Couldn't save resume file: %s", wErr.Error())
			}
			return
		}
	}

	defer func() {
		if settings.ResumeFrom == "" {
			return
		}

		rErr := os.Remove(settings.ResumeFrom)
		if rErr != nil {
			settings.Consumer.Warnf("Couldn't remove resume file: %s", rErr.Error())
		}
	}()

	if settings.OnUncompressedSizeKnown != nil {
		settings.OnUncompressedSizeKnown(totalSize)
	}

	windows := runtime.GOOS == "windows"

	for fileIndex, file := range reader.File {
		if fileIndex <= lastDoneIndex {
			settings.Consumer.Debugf("Skipping file %d")
			doneSize += file.UncompressedSize64
			settings.Consumer.Progress(float64(doneSize) / float64(totalSize))
			continue
		}

		err = func() error {
			rel := file.Name
			filename := path.Join(dir, filepath.FromSlash(rel))

			info := file.FileInfo()
			mode := info.Mode()

			if info.IsDir() {
				err = Mkdir(filename)
				if err != nil {
					return errors.Wrap(err, 1)
				}
				dirCount++
			} else if mode&os.ModeSymlink > 0 && !windows {
				fileReader, fErr := file.Open()
				if fErr != nil {
					return errors.Wrap(fErr, 1)
				}
				defer fileReader.Close()

				linkname, lErr := ioutil.ReadAll(fileReader)
				lErr = Symlink(string(linkname), filename, settings.Consumer)
				if lErr != nil {
					return errors.Wrap(lErr, 1)
				}
				symlinkCount++
			} else {
				regCount++

				fileReader, fErr := file.Open()
				if fErr != nil {
					return errors.Wrap(fErr, 1)
				}
				defer fileReader.Close()

				settings.Consumer.Debugf("extract %s", filename)
				countingReader := counter.NewReaderCallback(func(offset int64) {
					currentSize := int64(doneSize) + offset
					settings.Consumer.Progress(float64(currentSize) / float64(totalSize))
				}, fileReader)

				err = CopyFile(filename, os.FileMode(mode&LuckyMode|ModeMask), countingReader)
				if err != nil {
					return errors.Wrap(err, 1)
				}
			}

			return nil
		}()
		if err != nil {
			return nil, errors.Wrap(err, 1)
		}

		doneSize += file.UncompressedSize64
		settings.Consumer.Progress(float64(doneSize) / float64(totalSize))
		writeProgress(fileIndex)
	}

	return &ExtractResult{
		Dirs:     dirCount,
		Files:    regCount,
		Symlinks: symlinkCount,
	}, nil
}
Пример #4
0
// WritePatch outputs a pwr patch to patchWriter
func (dctx *DiffContext) WritePatch(patchWriter io.Writer, signatureWriter io.Writer) error {
	if dctx.Compression == nil {
		return errors.Wrap(fmt.Errorf("No compression settings specified, bailing out"), 1)
	}

	// signature header
	rawSigWire := wire.NewWriteContext(signatureWriter)
	err := rawSigWire.WriteMagic(SignatureMagic)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = rawSigWire.WriteMessage(&SignatureHeader{
		Compression: dctx.Compression,
	})
	if err != nil {
		return errors.Wrap(err, 1)
	}

	sigWire, err := CompressWire(rawSigWire, dctx.Compression)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = sigWire.WriteMessage(dctx.SourceContainer)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	// patch header
	rawPatchWire := wire.NewWriteContext(patchWriter)
	err = rawPatchWire.WriteMagic(PatchMagic)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	header := &PatchHeader{
		Compression: dctx.Compression,
	}

	err = rawPatchWire.WriteMessage(header)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	patchWire, err := CompressWire(rawPatchWire, dctx.Compression)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = patchWire.WriteMessage(dctx.TargetContainer)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = patchWire.WriteMessage(dctx.SourceContainer)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	sourceBytes := dctx.SourceContainer.Size
	fileOffset := int64(0)

	onSourceRead := func(count int64) {
		dctx.Consumer.Progress(float64(fileOffset+count) / float64(sourceBytes))
	}

	sigWriter := makeSigWriter(sigWire)
	opsWriter := makeOpsWriter(patchWire, dctx)

	diffContext := mksync()
	signContext := mksync()
	blockLibrary := wsync.NewBlockLibrary(dctx.TargetSignature)

	targetContainerPathToIndex := make(map[string]int64)
	for index, f := range dctx.TargetContainer.Files {
		targetContainerPathToIndex[f.Path] = int64(index)
	}

	// re-used messages
	syncHeader := &SyncHeader{}
	syncDelimiter := &SyncOp{
		Type: SyncOp_HEY_YOU_DID_IT,
	}

	pool := dctx.Pool
	defer func() {
		if fErr := pool.Close(); fErr != nil && err == nil {
			err = errors.Wrap(fErr, 1)
		}
	}()

	for fileIndex, f := range dctx.SourceContainer.Files {
		dctx.Consumer.ProgressLabel(f.Path)
		dctx.Consumer.Debug(fmt.Sprintf("%s (%s)", f.Path, humanize.IBytes(uint64(f.Size))))
		fileOffset = f.Offset

		syncHeader.Reset()
		syncHeader.FileIndex = int64(fileIndex)
		err = patchWire.WriteMessage(syncHeader)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		var sourceReader io.Reader
		sourceReader, err = pool.GetReader(int64(fileIndex))
		if err != nil {
			return errors.Wrap(err, 1)
		}

		//             / differ
		// source file +
		//             \ signer
		diffReader, diffWriter := io.Pipe()
		signReader, signWriter := io.Pipe()

		done := make(chan bool)
		errs := make(chan error)

		var preferredFileIndex int64 = -1
		if oldIndex, ok := targetContainerPathToIndex[f.Path]; ok {
			preferredFileIndex = oldIndex
		}

		go diffFile(diffContext, dctx, blockLibrary, diffReader, opsWriter, preferredFileIndex, errs, done)
		go signFile(signContext, fileIndex, signReader, sigWriter, errs, done)

		go func() {
			defer func() {
				if dErr := diffWriter.Close(); dErr != nil {
					errs <- errors.Wrap(dErr, 1)
				}
			}()
			defer func() {
				if sErr := signWriter.Close(); sErr != nil {
					errs <- errors.Wrap(sErr, 1)
				}
			}()

			mw := io.MultiWriter(diffWriter, signWriter)

			sourceReadCounter := counter.NewReaderCallback(onSourceRead, sourceReader)
			_, cErr := io.Copy(mw, sourceReadCounter)
			if cErr != nil {
				errs <- errors.Wrap(cErr, 1)
			}
		}()

		// wait until all are done
		// or an error occurs
		for c := 0; c < 2; c++ {
			select {
			case wErr := <-errs:
				return errors.Wrap(wErr, 1)
			case <-done:
			}
		}

		err = patchWire.WriteMessage(syncDelimiter)
		if err != nil {
			return errors.Wrap(err, 1)
		}
	}

	err = patchWire.Close()
	if err != nil {
		return errors.Wrap(err, 1)
	}
	err = sigWire.Close()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	return nil
}