Beispiel #1
0
func checkHashes(header http.Header, file string) (bool, error) {
	googHashes := header[http.CanonicalHeaderKey("x-goog-hash")]

	for _, googHash := range googHashes {
		tokens := strings.SplitN(googHash, "=", 2)
		hashType := tokens[0]
		hashValue, err := base64.StdEncoding.DecodeString(tokens[1])
		if err != nil {
			comm.Logf("Could not verify %s hash: %s", hashType, err)
			continue
		}

		start := time.Now()
		checked, err := checkHash(hashType, hashValue, file)
		if err != nil {
			return false, errors.Wrap(err, 1)
		}

		if checked {
			comm.Debugf("%10s pass (took %s)", hashType, time.Since(start))
		} else {
			comm.Debugf("%10s skip (use --thorough to force check)", hashType)
		}
	}

	return true, nil
}
Beispiel #2
0
func dittoSymlink(srcpath string, dstpath string, f os.FileInfo) {
	must(os.RemoveAll(dstpath))

	linkname, err := os.Readlink(srcpath)
	must(err)

	comm.Debugf("ln -s %s %s", linkname, dstpath)
	must(os.Symlink(linkname, dstpath))
}
Beispiel #3
0
func dittoReg(srcpath string, dstpath string, mode os.FileMode) {
	comm.Debugf("cp -f %s %s", srcpath, dstpath)
	must(os.RemoveAll(dstpath))

	writer, err := os.OpenFile(dstpath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
	must(err)
	defer writer.Close()

	reader, err := os.Open(srcpath)
	must(err)
	defer reader.Close()

	_, err = io.Copy(writer, reader)
	must(err)

	must(os.Chmod(dstpath, mode))
}
Beispiel #4
0
func checkIntegrity(resp *http.Response, totalBytes int64, file string) (bool, error) {
	diskSize := int64(0)
	stats, err := os.Lstat(file)
	if err == nil {
		diskSize = stats.Size()
	}

	if resp.ContentLength > 0 {
		if totalBytes != diskSize {
			return false, fmt.Errorf("Corrupt download: expected %d bytes, got %d", totalBytes, diskSize)
		}

		comm.Debugf("%10s pass (%d bytes)", "size", totalBytes)
	}

	return checkHashes(resp.Header, file)
}
Beispiel #5
0
func doPush(buildPath string, specStr string, userVersion string, fixPerms bool) error {
	// start walking source container while waiting on auth flow
	sourceContainerChan := make(chan walkResult)
	walkErrs := make(chan error)
	go doWalk(buildPath, sourceContainerChan, walkErrs, fixPerms)

	spec, err := itchio.ParseSpec(specStr)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = spec.EnsureChannel()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	client, err := authenticateViaOauth()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	newBuildRes, err := client.CreateBuild(spec.Target, spec.Channel, userVersion)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	buildID := newBuildRes.Build.ID
	parentID := newBuildRes.Build.ParentBuild.ID

	var targetSignature *pwr.SignatureInfo

	if parentID == 0 {
		comm.Opf("For channel `%s`: pushing first build", spec.Channel)
		targetSignature = &pwr.SignatureInfo{
			Container: &tlc.Container{},
			Hashes:    make([]wsync.BlockHash, 0),
		}
	} else {
		comm.Opf("For channel `%s`: last build is %d, downloading its signature", spec.Channel, parentID)
		var buildFiles itchio.ListBuildFilesResponse
		buildFiles, err = client.ListBuildFiles(parentID)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		signatureFile := itchio.FindBuildFile(itchio.BuildFileType_SIGNATURE, buildFiles.Files)
		if signatureFile == nil {
			comm.Dief("Could not find signature for parent build %d, aborting", parentID)
		}

		var signatureReader io.Reader
		signatureReader, err = client.DownloadBuildFile(parentID, signatureFile.ID)
		if err != nil {
			return errors.Wrap(err, 1)
		}

		targetSignature, err = pwr.ReadSignature(signatureReader)
		if err != nil {
			return errors.Wrap(err, 1)
		}
	}

	newPatchRes, newSignatureRes, err := createBothFiles(client, buildID)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	uploadDone := make(chan bool)
	uploadErrs := make(chan error)

	patchWriter, err := uploader.NewResumableUpload(newPatchRes.File.UploadURL,
		uploadDone, uploadErrs, uploader.ResumableUploadSettings{
			Consumer: comm.NewStateConsumer(),
		})
	patchWriter.MaxChunkGroup = *appArgs.maxChunkGroup
	if err != nil {
		return errors.Wrap(err, 1)
	}

	signatureWriter, err := uploader.NewResumableUpload(newSignatureRes.File.UploadURL,
		uploadDone, uploadErrs, uploader.ResumableUploadSettings{
			Consumer: comm.NewStateConsumer(),
		})
	signatureWriter.MaxChunkGroup = *appArgs.maxChunkGroup
	if err != nil {
		return errors.Wrap(err, 1)
	}

	comm.Debugf("Launching patch & signature channels")

	patchCounter := counter.NewWriter(patchWriter)
	signatureCounter := counter.NewWriter(signatureWriter)

	// we started walking the source container in the beginning,
	// we actually need it now.
	// note that we could actually start diffing before all the file
	// creation & upload setup is done

	var sourceContainer *tlc.Container
	var sourcePool wsync.Pool

	comm.Debugf("Waiting for source container")
	select {
	case walkErr := <-walkErrs:
		return errors.Wrap(walkErr, 1)
	case walkies := <-sourceContainerChan:
		comm.Debugf("Got sourceContainer!")
		sourceContainer = walkies.container
		sourcePool = walkies.pool
		break
	}

	comm.Opf("Pushing %s (%s)", humanize.IBytes(uint64(sourceContainer.Size)), sourceContainer.Stats())

	comm.Debugf("Building diff context")
	var readBytes int64

	bytesPerSec := float64(0)
	lastUploadedBytes := int64(0)
	stopTicking := make(chan struct{})

	updateProgress := func() {
		uploadedBytes := int64(float64(patchWriter.UploadedBytes))

		// input bytes that aren't in output, for example:
		//  - bytes that have been compressed away
		//  - bytes that were in old build and were simply reused
		goneBytes := readBytes - patchWriter.TotalBytes

		conservativeTotalBytes := sourceContainer.Size - goneBytes

		leftBytes := conservativeTotalBytes - uploadedBytes
		if leftBytes > AlmostThereThreshold {
			netStatus := "- network idle"
			if bytesPerSec > 1 {
				netStatus = fmt.Sprintf("@ %s/s", humanize.IBytes(uint64(bytesPerSec)))
			}
			comm.ProgressLabel(fmt.Sprintf("%s, %s left", netStatus, humanize.IBytes(uint64(leftBytes))))
		} else {
			comm.ProgressLabel(fmt.Sprintf("- almost there"))
		}

		conservativeProgress := float64(uploadedBytes) / float64(conservativeTotalBytes)
		conservativeProgress = min(1.0, conservativeProgress)
		comm.Progress(conservativeProgress)

		comm.ProgressScale(float64(readBytes) / float64(sourceContainer.Size))
	}

	go func() {
		ticker := time.NewTicker(time.Second * time.Duration(2))
		for {
			select {
			case <-ticker.C:
				bytesPerSec = float64(patchWriter.UploadedBytes-lastUploadedBytes) / 2.0
				lastUploadedBytes = patchWriter.UploadedBytes
				updateProgress()
			case <-stopTicking:
				break
			}
		}
	}()

	patchWriter.OnProgress = updateProgress

	stateConsumer := &state.Consumer{
		OnProgress: func(progress float64) {
			readBytes = int64(float64(sourceContainer.Size) * progress)
			updateProgress()
		},
	}

	dctx := &pwr.DiffContext{
		Compression: &pwr.CompressionSettings{
			Algorithm: pwr.CompressionAlgorithm_BROTLI,
			Quality:   1,
		},

		SourceContainer: sourceContainer,
		Pool:            sourcePool,

		TargetContainer: targetSignature.Container,
		TargetSignature: targetSignature.Hashes,

		Consumer: stateConsumer,
	}

	comm.StartProgress()
	comm.ProgressScale(0.0)
	err = dctx.WritePatch(patchCounter, signatureCounter)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	// close in a goroutine to avoid deadlocking
	doClose := func(c io.Closer, done chan bool, errs chan error) {
		closeErr := c.Close()
		if closeErr != nil {
			errs <- errors.Wrap(closeErr, 1)
			return
		}

		done <- true
	}

	go doClose(patchWriter, uploadDone, uploadErrs)
	go doClose(signatureWriter, uploadDone, uploadErrs)

	for c := 0; c < 4; c++ {
		select {
		case uploadErr := <-uploadErrs:
			return errors.Wrap(uploadErr, 1)
		case <-uploadDone:
			comm.Debugf("upload done")
		}
	}

	close(stopTicking)
	comm.ProgressLabel("finalizing build")

	finalDone := make(chan bool)
	finalErrs := make(chan error)

	doFinalize := func(fileID int64, fileSize int64, done chan bool, errs chan error) {
		_, err = client.FinalizeBuildFile(buildID, fileID, fileSize)
		if err != nil {
			errs <- errors.Wrap(err, 1)
			return
		}

		done <- true
	}

	go doFinalize(newPatchRes.File.ID, patchCounter.Count(), finalDone, finalErrs)
	go doFinalize(newSignatureRes.File.ID, signatureCounter.Count(), finalDone, finalErrs)

	for i := 0; i < 2; i++ {
		select {
		case err := <-finalErrs:
			return errors.Wrap(err, 1)
		case <-finalDone:
		}
	}

	comm.EndProgress()

	{
		prettyPatchSize := humanize.IBytes(uint64(patchCounter.Count()))
		percReused := 100.0 * float64(dctx.ReusedBytes) / float64(dctx.FreshBytes+dctx.ReusedBytes)
		relToNew := 100.0 * float64(patchCounter.Count()) / float64(sourceContainer.Size)
		prettyFreshSize := humanize.IBytes(uint64(dctx.FreshBytes))
		savings := 100.0 - relToNew

		if dctx.ReusedBytes > 0 {
			comm.Statf("Re-used %.2f%% of old, added %s fresh data", percReused, prettyFreshSize)
		} else {
			comm.Statf("Added %s fresh data", prettyFreshSize)
		}

		if savings > 0 && !math.IsNaN(savings) {
			comm.Statf("%s patch (%.2f%% savings)", prettyPatchSize, 100.0-relToNew)
		} else {
			comm.Statf("%s patch (no savings)", prettyPatchSize)
		}
	}
	comm.Opf("Build is now processing, should be up in a bit (see `butler status`)")
	comm.Logf("")

	return nil
}
Beispiel #6
0
func createBothFiles(client *itchio.Client, buildID int64) (patch itchio.NewBuildFileResponse, signature itchio.NewBuildFileResponse, err error) {
	createFile := func(buildType itchio.BuildFileType, done chan fileSlot, errs chan error) {
		var res itchio.NewBuildFileResponse
		res, err = client.CreateBuildFile(buildID, buildType, itchio.BuildFileSubType_DEFAULT, itchio.UploadType_DEFERRED_RESUMABLE)
		if err != nil {
			errs <- errors.Wrap(err, 1)
		}
		comm.Debugf("Created %s build file: %+v", buildType, res.File)

		// TODO: resumable upload session creation sounds like it belongs in an external lib, go-itchio maybe?
		req, reqErr := http.NewRequest("POST", res.File.UploadURL, nil)
		if reqErr != nil {
			errs <- errors.Wrap(reqErr, 1)
		}

		req.ContentLength = 0

		for k, v := range res.File.UploadHeaders {
			req.Header.Add(k, v)
		}

		gcsRes, gcsErr := client.HTTPClient.Do(req)
		if gcsErr != nil {
			errs <- errors.Wrap(gcsErr, 1)
		}

		if gcsRes.StatusCode != 201 {
			errs <- errors.Wrap(fmt.Errorf("could not create resumable upload session (got HTTP %d)", gcsRes.StatusCode), 1)
		}

		comm.Debugf("Started resumable upload session %s", gcsRes.Header.Get("Location"))

		res.File.UploadHeaders = nil
		res.File.UploadURL = gcsRes.Header.Get("Location")

		done <- fileSlot{buildType, res}
	}

	done := make(chan fileSlot)
	errs := make(chan error)

	go createFile(itchio.BuildFileType_PATCH, done, errs)
	go createFile(itchio.BuildFileType_SIGNATURE, done, errs)

	for i := 0; i < 2; i++ {
		select {
		case err = <-errs:
			err = errors.Wrap(err, 1)
			return
		case slot := <-done:
			switch slot.Type {
			case itchio.BuildFileType_PATCH:
				patch = slot.Response
			case itchio.BuildFileType_SIGNATURE:
				signature = slot.Response
			}
		}
	}

	return
}
Beispiel #7
0
func doFetch(specStr string, outPath string) error {
	var err error

	err = os.MkdirAll(outPath, os.FileMode(0755))
	if err != nil {
		return errors.Wrap(err, 1)
	}

	outFiles, err := ioutil.ReadDir(outPath)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	if len(outFiles) > 0 {
		return fmt.Errorf("Destination directory %s exists and is not empty", outPath)
	}

	spec, err := itchio.ParseSpec(specStr)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	err = spec.EnsureChannel()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	client, err := authenticateViaOauth()
	if err != nil {
		return errors.Wrap(err, 1)
	}

	comm.Opf("Getting last build of channel %s", spec.Channel)

	channelResponse, err := client.GetChannel(spec.Target, spec.Channel)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	if channelResponse.Channel.Head == nil {
		return fmt.Errorf("Channel %s doesn't have any builds yet", spec.Channel)
	}

	head := *channelResponse.Channel.Head
	var headArchive *itchio.BuildFileInfo

	for _, file := range head.Files {
		comm.Debugf("found file %v", file)
		if file.Type == itchio.BuildFileType_ARCHIVE && file.SubType == itchio.BuildFileSubType_DEFAULT && file.State == itchio.BuildFileState_UPLOADED {
			headArchive = file
			break
		}
	}

	if headArchive == nil {
		return fmt.Errorf("Channel %s's latest build is still processing", spec.Channel)
	}

	dlReader, err := client.DownloadBuildFile(head.ID, headArchive.ID)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	tmpFile, err := ioutil.TempFile("", "butler-fetch")
	if err != nil {
		return errors.Wrap(err, 1)
	}

	defer func() {
		if cErr := os.Remove(tmpFile.Name()); err == nil && cErr != nil {
			err = cErr
		}
	}()

	comm.Opf("Downloading build %d", head.ID)

	archiveSize, err := io.Copy(tmpFile, dlReader)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	_, err = tmpFile.Seek(0, os.SEEK_SET)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	settings := archiver.ExtractSettings{
		Consumer: comm.NewStateConsumer(),
	}

	comm.Opf("Extracting into %s", outPath)
	result, err := archiver.Extract(tmpFile, archiveSize, outPath, settings)
	if err != nil {
		return errors.Wrap(err, 1)
	}

	comm.Statf("Extracted %d dirs, %d files, %d links into %s", result.Dirs, result.Files, result.Symlinks, outPath)

	if err != nil {
		return errors.Wrap(err, 1)
	}
	return nil
}
Beispiel #8
0
func doProbe(patch string) error {
	patchReader, err := eos.Open(patch)
	if err != nil {
		return err
	}

	defer patchReader.Close()

	stats, err := patchReader.Stat()
	if err != nil {
		return err
	}

	comm.Statf("patch:  %s", humanize.IBytes(uint64(stats.Size())))

	rctx := wire.NewReadContext(patchReader)
	err = rctx.ExpectMagic(pwr.PatchMagic)
	if err != nil {
		return err
	}

	header := &pwr.PatchHeader{}
	err = rctx.ReadMessage(header)
	if err != nil {
		return err
	}

	rctx, err = pwr.DecompressWire(rctx, header.Compression)
	if err != nil {
		return err
	}

	target := &tlc.Container{}
	err = rctx.ReadMessage(target)
	if err != nil {
		return err
	}

	source := &tlc.Container{}
	err = rctx.ReadMessage(source)
	if err != nil {
		return err
	}

	comm.Statf("target: %s in %s", humanize.IBytes(uint64(target.Size)), target.Stats())
	comm.Statf("source: %s in %s", humanize.IBytes(uint64(target.Size)), source.Stats())

	var patchStats []patchStat

	sh := &pwr.SyncHeader{}
	rop := &pwr.SyncOp{}

	for fileIndex, f := range source.Files {
		stat := patchStat{
			fileIndex: int64(fileIndex),
			freshData: f.Size,
		}

		sh.Reset()
		err = rctx.ReadMessage(sh)
		if err != nil {
			return err
		}

		if sh.FileIndex != int64(fileIndex) {
			return fmt.Errorf("malformed patch: expected file %d, got %d", fileIndex, sh.FileIndex)
		}

		readingOps := true

		var pos int64

		for readingOps {
			rop.Reset()

			err = rctx.ReadMessage(rop)
			if err != nil {
				return err
			}

			switch rop.Type {
			case pwr.SyncOp_BLOCK_RANGE:
				fixedSize := (rop.BlockSpan - 1) * pwr.BlockSize
				lastIndex := rop.BlockIndex + (rop.BlockSpan - 1)
				lastSize := pwr.ComputeBlockSize(f.Size, lastIndex)
				totalSize := (fixedSize + lastSize)
				stat.freshData -= totalSize
				pos += totalSize
			case pwr.SyncOp_DATA:
				totalSize := int64(len(rop.Data))
				if *appArgs.verbose {
					comm.Debugf("%s fresh data at %s (%d-%d)", humanize.IBytes(uint64(totalSize)), humanize.IBytes(uint64(pos)),
						pos, pos+totalSize)
				}
				pos += totalSize
			case pwr.SyncOp_HEY_YOU_DID_IT:
				readingOps = false
			}
		}

		patchStats = append(patchStats, stat)
	}

	sort.Sort(byDecreasingFreshData(patchStats))

	var totalFresh int64
	for _, stat := range patchStats {
		totalFresh += stat.freshData
	}

	var eightyFresh = int64(0.8 * float64(totalFresh))
	var printedFresh int64

	comm.Opf("80%% of fresh data is in the following files:")

	for _, stat := range patchStats {
		f := source.Files[stat.fileIndex]
		comm.Logf("%s in %s (%.2f%% changed)",
			humanize.IBytes(uint64(stat.freshData)),
			f.Path,
			float64(stat.freshData)/float64(f.Size)*100.0)

		printedFresh += stat.freshData
		if printedFresh >= eightyFresh {
			break
		}
	}

	return nil
}
Beispiel #9
0
func tryDl(url string, dest string) (int64, error) {
	existingBytes := int64(0)
	stats, err := os.Lstat(dest)
	if err == nil {
		existingBytes = stats.Size()
	}

	client := timeout.NewDefaultClient()

	req, _ := http.NewRequest("GET", url, nil)
	req.Header.Set("User-Agent", userAgent())
	byteRange := fmt.Sprintf("bytes=%d-", existingBytes)

	req.Header.Set("Range", byteRange)
	resp, err := client.Do(req)
	if err != nil {
		return 0, errors.Wrap(err, 1)
	}
	defer resp.Body.Close()

	doDownload := true
	totalBytes := existingBytes + resp.ContentLength

	hostInfo := fmt.Sprintf("%s at %s", resp.Header.Get("Server"), req.Host)

	switch resp.StatusCode {
	case 200: // OK
		comm.Debugf("HTTP 200 OK (no byte range support)")
		totalBytes = resp.ContentLength

		if existingBytes == resp.ContentLength {
			// already have the exact same number of bytes, hopefully the same ones
			doDownload = false
		} else {
			// will send data, but doesn't support byte ranges
			existingBytes = 0
			os.Truncate(dest, 0)
		}
	case 206: // Partial Content
		comm.Debugf("HTTP 206 Partial Content")
		// will send incremental data
	case 416: // Requested Range not Satisfiable
		comm.Debugf("HTTP 416 Requested Range not Satisfiable")
		// already has everything
		doDownload = false

		req, _ := http.NewRequest("HEAD", url, nil)
		req.Header.Set("User-Agent", userAgent())
		resp, err = client.Do(req)
		if err != nil {
			return 0, errors.Wrap(err, 1)
		}

		if existingBytes > resp.ContentLength {
			comm.Debugf("Existing file too big (%d), truncating to %d", existingBytes, resp.ContentLength)
			existingBytes = resp.ContentLength
			os.Truncate(dest, existingBytes)
		}
		totalBytes = existingBytes
	default:
		return 0, fmt.Errorf("%s responded with HTTP %s", hostInfo, resp.Status)
	}

	if doDownload {
		if existingBytes > 0 {
			comm.Logf("Resuming (%s + %s = %s) download from %s", humanize.IBytes(uint64(existingBytes)), humanize.IBytes(uint64(resp.ContentLength)), humanize.IBytes(uint64(totalBytes)), hostInfo)
		} else {
			comm.Logf("Downloading %s from %s", humanize.IBytes(uint64(resp.ContentLength)), hostInfo)
		}
		err = appendAllToFile(resp.Body, dest, existingBytes, totalBytes)
		if err != nil {
			return 0, errors.Wrap(err, 1)
		}
	} else {
		comm.Log("Already fully downloaded")
	}

	_, err = checkIntegrity(resp, totalBytes, dest)
	if err != nil {
		comm.Log("Integrity checks failed, truncating")
		os.Truncate(dest, 0)
		return 0, errors.Wrap(err, 1)
	}

	return totalBytes, nil
}
Beispiel #10
0
// Does not preserve users, nor permission, except the executable bit
func ditto(src string, dst string) {
	comm.Debugf("rsync -a %s %s", src, dst)

	totalSize := int64(0)
	doneSize := int64(0)
	oldProgress := 0.0

	inc := func(_ string, f os.FileInfo, err error) error {
		if err != nil {
			return nil
		}
		totalSize += f.Size()
		return nil
	}

	onFile := func(path string, f os.FileInfo, err error) error {
		if err != nil {
			comm.Logf("ignoring error %s", err.Error())
			return nil
		}

		rel, err := filepath.Rel(src, path)
		must(err)

		dstpath := filepath.Join(dst, rel)
		mode := f.Mode()

		switch {
		case mode.IsDir():
			dittoMkdir(dstpath)

		case mode.IsRegular():
			dittoReg(path, dstpath, os.FileMode(f.Mode()&archiver.LuckyMode|archiver.ModeMask))

		case (mode&os.ModeSymlink > 0):
			dittoSymlink(path, dstpath, f)
		}

		comm.Debug(rel)

		doneSize += f.Size()

		progress := float64(doneSize) / float64(totalSize)
		if progress-oldProgress > 0.01 {
			oldProgress = progress
			comm.Progress(progress)
		}

		return nil
	}

	rootinfo, err := os.Lstat(src)
	must(err)

	if rootinfo.IsDir() {
		totalSize = 0
		comm.Logf("Counting files in %s...", src)
		filepath.Walk(src, inc)

		comm.Logf("Mirroring...")
		filepath.Walk(src, onFile)
	} else {
		totalSize = rootinfo.Size()
		onFile(src, rootinfo, nil)
	}

	comm.EndProgress()
}
Beispiel #11
0
func tryWipe(path string) error {
	comm.Debugf("rm -rf %s", path)
	return os.RemoveAll(path)
}
Beispiel #12
0
func dittoMkdir(dstpath string) {
	comm.Debugf("mkdir %s", dstpath)
	must(archiver.Mkdir(dstpath))
}
Beispiel #13
0
func mkdir(dir string) {
	comm.Debugf("mkdir -p %s", dir)

	must(os.MkdirAll(dir, archiver.DirMode))
}