func customizeBar(bar *pb.ProgressBar) { bar.ShowCounters = true bar.ShowTimeLeft = false bar.ShowSpeed = true bar.SetMaxWidth(80) bar.SetUnits(pb.U_BYTES) }
// CheckMetadata downloads the metadata about all of the files currently // stored on Drive and compares it with the local cache. func (gd *GDrive) CheckMetadata(filename string, report func(string)) error { idToFile, err := gd.getIdToFile(filename) if err != nil { return err } // This will almost certainly take a while, so put up a progress bar. var bar *pb.ProgressBar if !gd.quiet { bar = pb.New(len(idToFile)) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Checking metadata cache: ") bar.Start() } err = gd.runQuery("trashed=false", func(f *drive.File) { if file, ok := idToFile[f.Id]; ok { df := newFile(f.Title, f) if !filesEqual(df, file) { report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v", file.Path, file, df)) } if bar != nil { bar.Increment() } delete(idToFile, f.Id) } else { // It'd be preferable to have "sharedWithMe=false" included in // the query string above, but the combination of that with // "trashed=false" seems to lead to no results being returned. if f.Shared == false { report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]", f.Title, f)) } } }) for _, f := range idToFile { report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]", f.Path, f)) } if bar != nil { bar.Finish() } return nil }
// processSystemArchives processes archives for given system func (h *Harvester) processSystemArchives(s *system.System, archives []string) error { var bar *pb.ProgressBar nb := len(archives) // extract archives if !s.Options.Quiet { fmt.Printf("[%s] Extracting %v archive(s)\n", s.Infos.Name, nb) if !s.Options.Debug { bar = pb.StartNew(nb) bar.ShowCounters = true bar.ShowPercent = false bar.ShowTimeLeft = true bar.SetMaxWidth(80) } } for _, archive := range archives { if !s.Options.Quiet && !s.Options.Debug { bar.Increment() } if err := s.ProcessArchive(archive, h.Options.Output); err != nil { return err } } if !s.Options.Quiet && !s.Options.Debug { bar.Finish() fmt.Printf("[%s] Processed %v files (skipped: %v)\n", s.Infos.Name, s.Processed, s.Skipped) } fmt.Printf("[%s] Selected %v games\n", s.Infos.Name, len(s.Games)) return nil }
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64, changeChan chan<- []*drive.Change, errorChan chan<- error) { var about *drive.About var err error // Get the Drive About information in order to figure out how many // changes we need to download to get up to date. for try := 0; ; try++ { about, err = svc.About.Get().Do() if err == nil { break } else { err = gd.tryToHandleDriveAPIError(err, try) } if err != nil { errorChan <- err return } } // Don't clutter the output with a progress bar unless it looks like // downloading changes may take a while. // TODO: consider using timer.AfterFunc to put up the progress bar if // we're not done after a few seconds? It's not clear if this is worth // the trouble. var bar *pb.ProgressBar numChanges := about.LargestChangeId - startChangeId if numChanges > 1000 && !gd.quiet { bar = pb.New64(numChanges) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Updating metadata cache: ") bar.Start() } pageToken := "" try := 0 // Keep asking Drive for more changes until we get through them all. for { // Only ask for the fields in the drive.Change structure that we // actually to be filled in to save some bandwidth... fields := []googleapi.Field{"nextPageToken", "items/id", "items/fileId", "items/deleted", "items/file/id", "items/file/parents", "items/file/title", "items/file/fileSize", "items/file/mimeType", "items/file/properties", "items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"} q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...) if startChangeId >= 0 { q = q.StartChangeId(startChangeId + 1) } if pageToken != "" { q = q.PageToken(pageToken) } r, err := q.Do() if err != nil { err = gd.tryToHandleDriveAPIError(err, try) if err != nil { errorChan <- err return } try++ continue } // Success. Reset the try counter in case we had errors leading up // to this. try = 0 if len(r.Items) > 0 { // Send the changes along to the goroutine that's updating the // local cache. changeChan <- r.Items if bar != nil { bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId)) } } pageToken = string(r.NextPageToken) if pageToken == "" { break } } // Signal that no more changes are coming. close(changeChan) if bar != nil { bar.Finish() } gd.debug("Done updating metadata from Drive") }
func install(l, version string) error { var currentStep lang.Step var bar *pb.ProgressBar var process *Process fmt.Printf("Installing %s@%s\n", l, version) err := service.Install(l, version, binaryFlag, func(step lang.Step, progress, total int64) { if currentStep != step { if bar != nil { bar.NotPrint = true bar.Finish() fmt.Printf(ascii2.EraseLine) bar = nil } if process != nil { process.Done("") process = nil } if total > 0 { bar = pb.New64(total).Prefix(" " + stepToMsg(step) + "\t\t") bar.SetWidth(40) bar.ShowCounters = false //fmt.Printf("%s\n", step) //bar.NotPrint = true bar.Start() currentStep = step } else { process := &Process{Msg: stepToMsg(step) + "\t\t"} process.Start() } } if bar != nil { bar.Set64(progress) } }) if bar != nil { bar.NotPrint = true bar.Finish() fmt.Printf(ascii2.EraseLines(2) + ascii2.EraseLine + fmt.Sprintf(" %s installed", l)) } if process != nil { process.Done("\n") } //fmt.Printf(ascii2.EraseLine + ascii2.CursorUp(1) + ascii2.EraseLine) if err != nil { fmt.Printf("Could not install %s@%s: \n %s\n", l, version, err.Error()) } else { fmt.Printf(" %s@%s installed!\n\n", l, version) } return err }
func main() { flag.Usage = usage help := flag.Bool("help", false, "show this message") version := flag.Bool("version", false, "show version") failpath := flag.String("faildir", "", "dir where failed torrentzips should be copied") flag.Parse() if *help { flag.Usage() os.Exit(0) } if *version { fmt.Fprintf(os.Stdout, "%s version %s, Copyright (c) 2013 Uwe Hoffmann. All rights reserved.\n", os.Args[0], versionStr) os.Exit(0) } if *failpath == "" { flag.Usage() os.Exit(0) } cv := new(countVisitor) for _, name := range flag.Args() { fmt.Fprintf(os.Stdout, "initial scan of %s to determine amount of work\n", name) err := filepath.Walk(name, cv.visit) if err != nil { fmt.Fprintf(os.Stderr, "failed to count in dir %s: %v\n", name, err) os.Exit(1) } } mg := int(cv.numBytes / megabyte) fmt.Fprintf(os.Stdout, "found %d files and %d MB to do. starting work...\n", cv.numFiles, mg) var byteProgress *pb.ProgressBar if mg > 10 { pb.BarStart = "MB [" byteProgress = pb.New(mg) byteProgress.RefreshRate = 5 * time.Second byteProgress.ShowCounters = true byteProgress.Start() } inwork := make(chan *workUnit) sv := &scanVisitor{ inwork: inwork, } wg := new(sync.WaitGroup) wg.Add(cv.numFiles) for i := 0; i < 8; i++ { worker := &testWorker{ byteProgress: byteProgress, failpath: *failpath, inwork: inwork, wg: wg, } go worker.run() } for _, name := range flag.Args() { err := filepath.Walk(name, sv.visit) if err != nil { fmt.Fprintf(os.Stderr, "failed to scan dir %s: %v\n", name, err) os.Exit(1) } } wg.Wait() close(inwork) if byteProgress != nil { byteProgress.Set(int(byteProgress.Total)) byteProgress.Finish() } fmt.Fprintf(os.Stdout, "Done.\n") }
bar.NotPrint = true bar.Finish() fmt.Printf(ascii2.EraseLine) bar = nil } if process != nil { process.Done("") process = nil } if total > 0 { bar = pb.New64(total).Prefix(" " + stepToMsg(step) + "\t\t") bar.SetWidth(40) bar.ShowCounters = false //fmt.Printf("%s\n", step) //bar.NotPrint = true bar.Start() currentStep = step } else { process := &Process{Msg: " " + stepToMsg(step) + "\t\t"} process.Start() } } if bar != nil { bar.Set64(progress) }