Exemplo n.º 1
0
// CheckMetadata downloads the metadata about all of the files currently
// stored on Drive and compares it with the local cache.
func (gd *GDrive) CheckMetadata(filename string, report func(string)) error {
	idToFile, err := gd.getIdToFile(filename)
	if err != nil {
		return err
	}

	// This will almost certainly take a while, so put up a progress bar.
	var bar *pb.ProgressBar
	if !gd.quiet {
		bar = pb.New(len(idToFile))
		bar.ShowBar = true
		bar.ShowCounters = false
		bar.Output = os.Stderr
		bar.Prefix("Checking metadata cache: ")
		bar.Start()
	}

	err = gd.runQuery("trashed=false", func(f *drive.File) {
		if file, ok := idToFile[f.Id]; ok {
			df := newFile(f.Title, f)
			if !filesEqual(df, file) {
				report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v",
					file.Path, file, df))
			}
			if bar != nil {
				bar.Increment()
			}
			delete(idToFile, f.Id)
		} else {
			// It'd be preferable to have "sharedWithMe=false" included in
			// the query string above, but the combination of that with
			// "trashed=false" seems to lead to no results being returned.
			if f.Shared == false {
				report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]",
					f.Title, f))
			}
		}
	})

	for _, f := range idToFile {
		report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]",
			f.Path, f))
	}

	if bar != nil {
		bar.Finish()
	}
	return nil
}
Exemplo n.º 2
0
// Synchronize a local directory hierarchy with Google Drive.
// localPath is the file or directory to start with, driveRoot is
// the directory into which the file/directory will be sent
func syncHierarchyUp(localPath string, driveRoot string, encrypt bool, trustTimes bool,
	maxSymlinkDepth int) int {
	if encrypt && key == nil {
		key = decryptEncryptionKey()
	}

	fileMappings, nUploadErrors := compileUploadFileTree(localPath, driveRoot,
		encrypt, trustTimes, maxSymlinkDepth)
	if len(fileMappings) == 0 {
		message("No files to be uploaded.")
		return 0
	}

	nBytesToUpload := int64(0)
	for _, info := range fileMappings {
		if !info.LocalFileInfo.IsDir() {
			nBytesToUpload += info.LocalFileInfo.Size()
		}
	}

	// Given the list of files to sync, first find all of the directories and
	// then either get or create a Drive folder for each one.
	directoryMappingMap := make(map[string]localToRemoteFileMapping)
	var directoryNames []string
	for _, localfile := range fileMappings {
		if localfile.LocalFileInfo.IsDir() {
			directoryNames = append(directoryNames, localfile.DrivePath)
			directoryMappingMap[localfile.DrivePath] = localfile
		}
	}

	// Now sort the directories by name, which ensures that the parent of each
	// directory has already been created if we need to create its children.
	sort.Strings(directoryNames)

	if len(directoryNames) > 0 {
		// Actually create/update the directories.
		var dirProgressBar *pb.ProgressBar
		if !quiet {
			dirProgressBar = pb.New(len(directoryNames))
			dirProgressBar.Output = os.Stderr
			dirProgressBar.Prefix("Directories: ")
			dirProgressBar.Start()
		}

		// Sync each of the directories, which serves to create any missing ones.
		for _, dirName := range directoryNames {
			file := directoryMappingMap[dirName]
			err := syncFileUp(file.LocalPath, file.LocalFileInfo, file.DrivePath,
				encrypt, dirProgressBar)
			if err != nil {
				// Errors creating directories are basically unrecoverable,
				// as they'll prevent us from later uploading any files in
				// them.
				printErrorAndExit(err)
			}
		}
		if dirProgressBar != nil {
			dirProgressBar.Finish()
		}
	}

	var fileProgressBar *pb.ProgressBar
	if !quiet {
		fileProgressBar = pb.New64(nBytesToUpload).SetUnits(pb.U_BYTES)
		fileProgressBar.Output = os.Stderr
		fileProgressBar.Prefix("Files: ")
		fileProgressBar.Start()
	}

	// Sort the files by size, small to large.
	sort.Sort(localToRemoteBySize(fileMappings))

	// The two indices uploadFrontIndex and uploadBackIndex point to the
	// range of elements in the fileMappings array that haven't yet been
	// uploaded.
	uploadFrontIndex := 0
	uploadBackIndex := len(fileMappings) - 1

	// First, upload any large files that will use the resumable upload
	// protocol using a single thread; more threads here doesn't generally
	// help improve bandwidth utilizaiton and seems to make rate limit
	// errors from the Drive API more frequent...
	for ; uploadBackIndex >= 0; uploadBackIndex-- {
		if fileMappings[uploadBackIndex].LocalFileInfo.Size() < resumableUploadMinSize {
			break
		}

		fm := fileMappings[uploadBackIndex]
		if fm.LocalFileInfo.IsDir() {
			continue
		}

		if err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt,
			fileProgressBar); err != nil {
			addErrorAndPrintMessage(&nUploadErrors, fm.LocalPath, err)
		}
	}

	// Upload worker threads send a value over this channel when
	// they're done; the code that launches them waits for all of them
	// to do so before returning.
	doneChan := make(chan int, nWorkers)

	// Now that multiple threads are running, we need a mutex to protect
	// access to uploadFrontIndex and uploadBackIndex.
	var uploadIndexMutex sync.Mutex

	// All but one of the upload threads will grab files to upload starting
	// from the begining of the fileMappings array, thus doing the smallest
	// files first; one thread starts from the back of the array, doing the
	// largest files first.  In this way, the large files help saturate the
	// available upload bandwidth and hide the fixed overhead of creating
	// the smaller files.
	uploadWorker := func(startFromFront bool) {
		for {
			uploadIndexMutex.Lock()
			if uploadFrontIndex > uploadBackIndex {
				// All files have been uploaded.
				debug.Printf("All files uploaded [%d,%d]; exiting",
					uploadFrontIndex, uploadBackIndex)
				uploadIndexMutex.Unlock()
				doneChan <- 1
				break
			}

			// Get the index into fileMappings for the next file this
			// worker should upload.
			var index int
			if startFromFront {
				index = uploadFrontIndex
				uploadFrontIndex++
			} else {
				index = uploadBackIndex
				uploadBackIndex--
			}
			uploadIndexMutex.Unlock()

			fm := fileMappings[index]
			if fm.LocalFileInfo.IsDir() {
				// Directories have already been taken care of.
				continue
			}

			err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt,
				fileProgressBar)
			if err != nil {
				atomic.AddInt32(&nUploadErrors, 1)
				fmt.Fprintf(os.Stderr, "\nskicka: %s: %v\n", fm.LocalPath, err)
			}
		}
	}

	// Launch the workers.
	for i := 0; i < nWorkers; i++ {
		// All workers except the first one start from the front of
		// the array.
		go uploadWorker(i != 0)
	}

	// Wait for all of the workers to finish.
	for i := 0; i < nWorkers; i++ {
		<-doneChan
	}
	if fileProgressBar != nil {
		fileProgressBar.Finish()
	}

	if nUploadErrors > 0 {
		fmt.Fprintf(os.Stderr, "skicka: %d files not uploaded due to errors. "+
			"This may be a transient failure; try uploading again.\n", nUploadErrors)
	}
	return int(nUploadErrors)
}
Exemplo n.º 3
0
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64,
	changeChan chan<- []*drive.Change, errorChan chan<- error) {
	var about *drive.About
	var err error

	// Get the Drive About information in order to figure out how many
	// changes we need to download to get up to date.
	for try := 0; ; try++ {
		about, err = svc.About.Get().Do()
		if err == nil {
			break
		} else {
			err = gd.tryToHandleDriveAPIError(err, try)
		}
		if err != nil {
			errorChan <- err
			return
		}
	}

	// Don't clutter the output with a progress bar unless it looks like
	// downloading changes may take a while.
	// TODO: consider using timer.AfterFunc to put up the progress bar if
	// we're not done after a few seconds?  It's not clear if this is worth
	// the trouble.
	var bar *pb.ProgressBar
	numChanges := about.LargestChangeId - startChangeId
	if numChanges > 1000 && !gd.quiet {
		bar = pb.New64(numChanges)
		bar.ShowBar = true
		bar.ShowCounters = false
		bar.Output = os.Stderr
		bar.Prefix("Updating metadata cache: ")
		bar.Start()
	}

	pageToken := ""
	try := 0
	// Keep asking Drive for more changes until we get through them all.
	for {
		// Only ask for the fields in the drive.Change structure that we
		// actually to be filled in to save some bandwidth...
		fields := []googleapi.Field{"nextPageToken",
			"items/id", "items/fileId", "items/deleted",
			"items/file/id", "items/file/parents", "items/file/title",
			"items/file/fileSize", "items/file/mimeType", "items/file/properties",
			"items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"}
		q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...)
		if startChangeId >= 0 {
			q = q.StartChangeId(startChangeId + 1)
		}
		if pageToken != "" {
			q = q.PageToken(pageToken)
		}

		r, err := q.Do()
		if err != nil {
			err = gd.tryToHandleDriveAPIError(err, try)
			if err != nil {
				errorChan <- err
				return
			}
			try++
			continue
		}

		// Success. Reset the try counter in case we had errors leading up
		// to this.
		try = 0

		if len(r.Items) > 0 {
			// Send the changes along to the goroutine that's updating the
			// local cache.
			changeChan <- r.Items

			if bar != nil {
				bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId))
			}
		}

		pageToken = string(r.NextPageToken)
		if pageToken == "" {
			break
		}
	}
	// Signal that no more changes are coming.
	close(changeChan)

	if bar != nil {
		bar.Finish()
	}

	gd.debug("Done updating metadata from Drive")
}