// CheckMetadata downloads the metadata about all of the files currently // stored on Drive and compares it with the local cache. func (gd *GDrive) CheckMetadata(filename string, report func(string)) error { idToFile, err := gd.getIdToFile(filename) if err != nil { return err } // This will almost certainly take a while, so put up a progress bar. var bar *pb.ProgressBar if !gd.quiet { bar = pb.New(len(idToFile)) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Checking metadata cache: ") bar.Start() } err = gd.runQuery("trashed=false", func(f *drive.File) { if file, ok := idToFile[f.Id]; ok { df := newFile(f.Title, f) if !filesEqual(df, file) { report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v", file.Path, file, df)) } if bar != nil { bar.Increment() } delete(idToFile, f.Id) } else { // It'd be preferable to have "sharedWithMe=false" included in // the query string above, but the combination of that with // "trashed=false" seems to lead to no results being returned. if f.Shared == false { report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]", f.Title, f)) } } }) for _, f := range idToFile { report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]", f.Path, f)) } if bar != nil { bar.Finish() } return nil }
func runClusterBackup(args *docopt.Args) error { client, err := getClusterClient() if err != nil { return err } var bar *pb.ProgressBar var progress backup.ProgressBar if term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() progress = bar } var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return err } defer f.Close() dest = f } fmt.Fprintln(os.Stderr, "Creating cluster backup...") if err := backup.Run(client, dest, progress); err != nil { return err } if bar != nil { bar.Finish() } fmt.Fprintln(os.Stderr, "Backup complete.") return nil }
// PostMultipartP posts a multipart message in the MIME internet format with a callback function with a string stating the upload Progress. func (c *Client) PostMultipartP(path string, files map[string][]byte, params Params, out interface{}, callback func(s string)) error { body := &bytes.Buffer{} writer := multipart.NewWriter(body) for name, source := range files { part, err := writer.CreateFormFile(name, "source.tgz") if err != nil { return err } _, err = io.Copy(part, bytes.NewReader(source)) if err != nil { return err } } for name, value := range params { writer.WriteField(name, value) } err := writer.Close() if err != nil { return err } var bodyReader io.Reader bodyReader = body var bar *pb.ProgressBar if callback != nil { bar = pb.New(body.Len()).SetUnits(pb.U_BYTES) bar.NotPrint = true bar.ShowBar = false bar.Callback = callback bar.Start() bodyReader = bar.NewProxyReader(body) } req, err := c.request("POST", path, bodyReader) if err != nil { return err } req.SetBasicAuth("convox", string(c.Password)) req.Header.Set("Content-Type", writer.FormDataContentType()) res, err := c.client().Do(req) if err != nil { return err } defer res.Body.Close() if err := responseError(res); err != nil { return err } data, err := ioutil.ReadAll(res.Body) if err != nil { return err } if out != nil { err = json.Unmarshal(data, out) if err != nil { return err } } if callback != nil { bar.Finish() } return nil }
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64, changeChan chan<- []*drive.Change, errorChan chan<- error) { var about *drive.About var err error // Get the Drive About information in order to figure out how many // changes we need to download to get up to date. for try := 0; ; try++ { about, err = svc.About.Get().Do() if err == nil { break } else { err = gd.tryToHandleDriveAPIError(err, try) } if err != nil { errorChan <- err return } } // Don't clutter the output with a progress bar unless it looks like // downloading changes may take a while. // TODO: consider using timer.AfterFunc to put up the progress bar if // we're not done after a few seconds? It's not clear if this is worth // the trouble. var bar *pb.ProgressBar numChanges := about.LargestChangeId - startChangeId if numChanges > 1000 && !gd.quiet { bar = pb.New64(numChanges) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Updating metadata cache: ") bar.Start() } pageToken := "" try := 0 // Keep asking Drive for more changes until we get through them all. for { // Only ask for the fields in the drive.Change structure that we // actually to be filled in to save some bandwidth... fields := []googleapi.Field{"nextPageToken", "items/id", "items/fileId", "items/deleted", "items/file/id", "items/file/parents", "items/file/title", "items/file/fileSize", "items/file/mimeType", "items/file/properties", "items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"} q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...) if startChangeId >= 0 { q = q.StartChangeId(startChangeId + 1) } if pageToken != "" { q = q.PageToken(pageToken) } r, err := q.Do() if err != nil { err = gd.tryToHandleDriveAPIError(err, try) if err != nil { errorChan <- err return } try++ continue } // Success. Reset the try counter in case we had errors leading up // to this. try = 0 if len(r.Items) > 0 { // Send the changes along to the goroutine that's updating the // local cache. changeChan <- r.Items if bar != nil { bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId)) } } pageToken = string(r.NextPageToken) if pageToken == "" { break } } // Signal that no more changes are coming. close(changeChan) if bar != nil { bar.Finish() } gd.debug("Done updating metadata from Drive") }