func StartClient(c chan string, bar *pb.ProgressBar) { readClient, writeClient := http.Client{}, http.Client{} for file := range c { file = escapeFile(file) read := readUrl + file write := writeUrl + file // get resp, err := readClient.Get(read) if err != nil { panic(err) } getMd5 := resp.Header.Get("X-Ae-Md5") length := resp.ContentLength // put req, err := http.NewRequest(*method, write, resp.Body) if err != nil { panic(err) } req.ContentLength = length wres, err := writeClient.Do(req) if err != nil { panic(err) } if wres.StatusCode != http.StatusConflict { putMd5 := wres.Header.Get("X-Ae-Md5") if putMd5 != getMd5 { fmt.Printf("ERROR! MD5 not equals: %s vs %s (%s)\n", getMd5, putMd5, file) } } wres.Body.Close() resp.Body.Close() bar.Increment() } }
func httpWorker(jobs <-chan int, hosts []host, bar *pb.ProgressBar) { for job := range jobs { h := &hosts[job] timeout, err := time.ParseDuration(*httpClientTimeout) if err != nil { log.Fatal(err) } client := http.Client{ Timeout: timeout, } url := fmt.Sprintf("%s://%s:%d/xmldata?item=All", *scheme, hosts[job].ip.String(), *port) // Anon function for proper wg.Done() call func() { defer bar.Increment() defer wg.Done() res, err := client.Get(url) if err != nil { h.Err += err.Error() return } xmldata, err := ioutil.ReadAll(res.Body) defer res.Body.Close() if err != nil { h.Err += err.Error() return } h.XMLErr = xml.Unmarshal(xmldata, h) }() } }
func (g *Game) getInitTweets(aliveNum int) { var progress *pb.ProgressBar if !g.debug { progress = pb.StartNew(aliveNum) } for y := 0; y < g.field.SizeY; y++ { for x := 0; x < g.field.SizeX; x++ { if g.field.Points[y][x].IsAlive { tweet := <-g.tweetCh if g.debug { emoji.Printf(":bird:Points[%v][%v]: %v\n", x, y, tweet) } g.field.Points[y][x].Str = tweet if !g.debug { progress.Increment() } } } } if g.debug { emoji.Println(":smile::smile::smile:Collected initial tweets:smile::smile::smile:") } else { e := emoji.Sprint(":smile::smile::smile:") progress.FinishPrint(e + "Collected initial tweets" + e) } }
func work(wg *sync.WaitGroup, bar *pb.ProgressBar, queue chan *Track) { defer wg.Done() for track := range queue { track.Download() bar.Increment() } }
func main() { var progress *pb.ProgressBar page := 0 category := "bugs" for { printf("Get page: %d", page) tickets, _, err := sfClient.Tracker.Info(category) if err != nil { log.Fatal(err) } if ghMilestones == nil { createMilestones(tickets) getMilestones() } if progress == nil { log.Println("Creating tickets") progress = pb.StartNew(tickets.Count) } if len(tickets.Tickets) == 0 { break } for _, ticket := range tickets.Tickets { ticket, _, err := sfClient.Tracker.Get(category, ticket.TicketNum) if err != nil { log.Fatal(err) } sfTicketToGhIssue(ticket, category) progress.Increment() time.Sleep(time.Millisecond * sleepTime) } page += 1 } progress.FinishPrint("All tickets imported") }
func HostMerge(List []Host, ShowBar bool) []string { count := 0 filterList := []string{""} length := len(List) var bar *pb.ProgressBar if ShowBar == true { bar = pb.StartNew(length) bar.SetMaxWidth(80) } for _, Host := range List { length = len(filterList[count]) if length == 0 { filterList[count] = Host.Hostname } else if length+Host.length() <= 255 && length != 0 { filterList[count] += "|" filterList[count] += Host.Hostname } else { count++ filterList = append(filterList, Host.Hostname) // filterList[count] = Ref.Referrer } if ShowBar == true { bar.Increment() time.Sleep(time.Millisecond * 50) } } if ShowBar == true { bar.Finish() } return filterList }
func customizeBar(bar *pb.ProgressBar) { bar.ShowCounters = true bar.ShowTimeLeft = false bar.ShowSpeed = true bar.SetMaxWidth(80) bar.SetUnits(pb.U_BYTES) }
func main() { opts, args := parseFlags() conv := cbconvert.NewConvertor(opts) var bar *pb.ProgressBar c := make(chan os.Signal, 3) signal.Notify(c, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) go func() { for _ = range c { fmt.Fprintf(os.Stderr, "Aborting\n") os.RemoveAll(conv.Workdir) os.Exit(1) } }() if _, err := os.Stat(opts.Outdir); err != nil { os.MkdirAll(opts.Outdir, 0777) } files := conv.GetFiles(args) if opts.Cover || opts.Thumbnail { if !opts.Quiet { bar = pb.New(conv.Nfiles) bar.ShowTimeLeft = false bar.Start() } } for _, file := range files { stat, err := os.Stat(file) if err != nil { fmt.Fprintf(os.Stderr, "Error Stat: %v\n", err.Error()) continue } if opts.Cover { conv.ExtractCover(file, stat) if !opts.Quiet { bar.Increment() } continue } else if opts.Thumbnail { conv.ExtractThumbnail(file, stat) if !opts.Quiet { bar.Increment() } continue } conv.ConvertComic(file, stat) } }
// download a file with the HTTP/HTTPS protocol showing a progress bar. The destination file is // always overwritten. func download(rawurl string, destinationPath string) { tempDestinationPath := destinationPath + ".tmp" destination, err := os.Create(tempDestinationPath) if err != nil { log.Fatalf("Unable to open the destination file: %s", tempDestinationPath) } defer destination.Close() response, err := customGet(rawurl) if err != nil { log.Fatalf("Unable to open a connection to %s", rawurl) } defer response.Body.Close() if response.StatusCode != http.StatusOK { log.Fatalf("Unexpected HTTP response code. Wanted 200 but got %d", response.StatusCode) } var progressBar *pb.ProgressBar contentLength, err := strconv.Atoi(response.Header.Get("Content-Length")) if err == nil { progressBar = pb.New(int(contentLength)) } else { progressBar = pb.New(0) } defer progressBar.Finish() progressBar.ShowSpeed = true progressBar.SetRefreshRate(time.Millisecond * 1000) progressBar.SetUnits(pb.U_BYTES) progressBar.Start() writer := io.MultiWriter(destination, progressBar) io.Copy(writer, response.Body) destination.Close() os.Rename(tempDestinationPath, destinationPath) }
func runClusterBackup(args *docopt.Args) error { client, err := getClusterClient() if err != nil { return err } var bar *pb.ProgressBar var progress backup.ProgressBar if term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() progress = bar } var dest io.Writer = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return err } defer f.Close() dest = f } fmt.Fprintln(os.Stderr, "Creating cluster backup...") if err := backup.Run(client, dest, progress); err != nil { return err } if bar != nil { bar.Finish() } fmt.Fprintln(os.Stderr, "Backup complete.") return nil }
func scan(ips []string, out chan ILOInfo, bar *pb.ProgressBar, wg *sync.WaitGroup) { for _, host := range ips { if IsOpen(host, iloPort) { srvName := "" iloName := "" info, err := requestInfo(host) if err != nil { fmt.Println(err) } if match, _ := regexp.MatchString("iLO (3|4|5)", info.HW); match { srvName, iloName, _ = requestServerName(host) } else { srvName, iloName, _ = requestServerNameV2(host) } info.ServerName = srvName info.IloName = iloName out <- *info } bar.Increment() } wg.Done() }
// processSystemArchives processes archives for given system func (h *Harvester) processSystemArchives(s *system.System, archives []string) error { var bar *pb.ProgressBar nb := len(archives) // extract archives if !s.Options.Quiet { fmt.Printf("[%s] Extracting %v archive(s)\n", s.Infos.Name, nb) if !s.Options.Debug { bar = pb.StartNew(nb) bar.ShowCounters = true bar.ShowPercent = false bar.ShowTimeLeft = true bar.SetMaxWidth(80) } } for _, archive := range archives { if !s.Options.Quiet && !s.Options.Debug { bar.Increment() } if err := s.ProcessArchive(archive, h.Options.Output); err != nil { return err } } if !s.Options.Quiet && !s.Options.Debug { bar.Finish() fmt.Printf("[%s] Processed %v files (skipped: %v)\n", s.Infos.Name, s.Processed, s.Skipped) } fmt.Printf("[%s] Selected %v games\n", s.Infos.Name, len(s.Games)) return nil }
func uploadItem(c *cli.Context, hash string, item client.IndexItem, bar *pb.ProgressBar, ch chan error) { data, err := ioutil.ReadFile(item.Name) if err != nil { ch <- err return } for i := 0; i < 3; i++ { err = rackClient(c).IndexUpload(hash, data) if err != nil { continue } bar.Add(item.Size) ch <- nil return } ch <- fmt.Errorf("max 3 retries on upload") return }
func indexAndSaveHits(ts *index.TokenSetSearcher, hits []HitInfo, idxs []int, saveFullHit func(*HitInfo) error) error { rank := 0 var bar *pb.ProgressBar if terminal.IsTerminal(int(os.Stdout.Fd())) { bar = pb.New(len(idxs)) bar.Start() } for i := range idxs { hit := &hits[idxs[i]] if i > 0 && hit.StaticScore < hits[idxs[i-1]].StaticScore { rank = i } hit.StaticRank = rank if err := saveFullHit(hit); err != nil { return err } var desc, readme string desc, hit.Description = hit.Description, "" readme, hit.ReadmeData = hit.ReadmeData, "" hit.Imported = nil hit.TestImported = nil var nameTokens stringsp.Set nameTokens = AppendTokens(nameTokens, []byte(hit.Name)) var tokens stringsp.Set tokens.Add(nameTokens.Elements()...) tokens = AppendTokens(tokens, []byte(hit.Package)) tokens = AppendTokens(tokens, []byte(desc)) tokens = AppendTokens(tokens, []byte(readme)) tokens = AppendTokens(tokens, []byte(hit.Author)) for _, word := range hit.Exported { AppendTokens(tokens, []byte(word)) } ts.AddDoc(map[string]stringsp.Set{ IndexTextField: tokens, IndexNameField: nameTokens, IndexPkgField: stringsp.NewSet(hit.Package), }, *hit) if bar != nil { bar.Increment() } } if bar != nil { bar.FinishPrint("Indexing finished!") } DumpMemStats() return nil }
func StoreRedirects(db *sql.DB, redirs []wikidump.Redirect, bar *pb.ProgressBar) error { counts := make([]linkCount, 0) var titleId, old, del, delTitle, insTitle, ins, update *sql.Stmt tx, err := db.Begin() if err == nil { titleId, err = tx.Prepare(`select id from titles where title = ?`) } if err == nil { old, err = tx.Prepare( `select ngramhash, count from linkstats where targetid = ?`) } if err == nil { del, err = tx.Prepare(`delete from linkstats where targetid = ?`) } if err == nil { delTitle, err = tx.Prepare(`delete from titles where id = ?`) } if err == nil { insTitle, err = tx.Prepare( `insert or ignore into titles values (NULL, ?)`) } if err == nil { ins, err = tx.Prepare( `insert or ignore into linkstats values (?, (select id from titles where title = ?), 0)`) } if err == nil { update, err = tx.Prepare( `update linkstats set count = count + ? where targetid = (select id from titles where title = ?) and ngramhash = ?`) } if err != nil { return err } for _, r := range redirs { if bar != nil { bar.Increment() } var fromId int err := titleId.QueryRow(r.Title).Scan(&fromId) if err == sql.ErrNoRows { // No links to this redirect. continue } else if err != nil { return err } rows, err := old.Query(fromId) if err != nil { return err } // SQLite won't let us INSERT or UPDATE while doing a SELECT. for counts = counts[:0]; rows.Next(); { var count float64 var hash int64 rows.Scan(&hash, &count) counts = append(counts, linkCount{hash, count}) } rows.Close() err = rows.Err() if err == nil { _, err = del.Exec(fromId) } if err == nil { _, err = delTitle.Exec(fromId) } if err != nil { return err } for _, c := range counts { if err == nil { _, err = insTitle.Exec(r.Target) } if err == nil { _, err = ins.Exec(c.hash, r.Target) } if err == nil { _, err = update.Exec(c.count, r.Target, c.hash) } } if err != nil { return err } } err = tx.Commit() return err }
// PostMultipartP posts a multipart message in the MIME internet format with a callback function with a string stating the upload Progress. func (c *Client) PostMultipartP(path string, files map[string][]byte, params Params, out interface{}, callback func(s string)) error { body := &bytes.Buffer{} writer := multipart.NewWriter(body) for name, source := range files { part, err := writer.CreateFormFile(name, "source.tgz") if err != nil { return err } _, err = io.Copy(part, bytes.NewReader(source)) if err != nil { return err } } for name, value := range params { writer.WriteField(name, value) } err := writer.Close() if err != nil { return err } var bodyReader io.Reader bodyReader = body var bar *pb.ProgressBar if callback != nil { bar = pb.New(body.Len()).SetUnits(pb.U_BYTES) bar.NotPrint = true bar.ShowBar = false bar.Callback = callback bar.Start() bodyReader = bar.NewProxyReader(body) } req, err := c.request("POST", path, bodyReader) if err != nil { return err } req.SetBasicAuth("convox", string(c.Password)) req.Header.Set("Content-Type", writer.FormDataContentType()) res, err := c.client().Do(req) if err != nil { return err } defer res.Body.Close() if err := responseError(res); err != nil { return err } data, err := ioutil.ReadAll(res.Body) if err != nil { return err } if out != nil { err = json.Unmarshal(data, out) if err != nil { return err } } if callback != nil { bar.Finish() } return nil }
func (ctx *Context) Load() error { var bar *pb.ProgressBar if Verbose { log.Println("loading database") } db, err := LoadDB(ctx.DatabaseName, *decompress) if os.IsNotExist(err) { log.Printf("database not found") return nil } else if err != nil { log.Printf("error loading database: %s", err) return err } if Verbose { log.Println("loading teams") bar = pb.StartNew(len(db.Teams)) } for _, team := range db.Teams { ctx.AddTeam(team) if Verbose { bar.Increment() } } if Verbose { bar.Finish() } if Verbose { log.Println("loading match history") bar = pb.StartNew(len(db.Outcomes)) } loadedOutcomes := map[string]*Outcome{} for _, outcome := range db.Outcomes { loadedOutcomes[outcome.ID] = outcome if Verbose { bar.Increment() } } if Verbose { bar.Finish() } if Verbose { bar = pb.StartNew(len(db.Matches)) } for _, match := range db.Matches { outcome, ok := loadedOutcomes[match.OutcomeID] if !ok { log.Panicf("corrupted history %q", match.ID) } ctx.AddMatch(match, outcome) if Verbose { bar.Increment() } } if Verbose { bar.Finish() } return nil }
func install(l, version string) error { var currentStep lang.Step var bar *pb.ProgressBar var process *Process fmt.Printf("Installing %s@%s\n", l, version) err := service.Install(l, version, binaryFlag, func(step lang.Step, progress, total int64) { if currentStep != step { if bar != nil { bar.NotPrint = true bar.Finish() fmt.Printf(ascii2.EraseLine) bar = nil } if process != nil { process.Done("") process = nil } if total > 0 { bar = pb.New64(total).Prefix(" " + stepToMsg(step) + "\t\t") bar.SetWidth(40) bar.ShowCounters = false //fmt.Printf("%s\n", step) //bar.NotPrint = true bar.Start() currentStep = step } else { process := &Process{Msg: stepToMsg(step) + "\t\t"} process.Start() } } if bar != nil { bar.Set64(progress) } }) if bar != nil { bar.NotPrint = true bar.Finish() fmt.Printf(ascii2.EraseLines(2) + ascii2.EraseLine + fmt.Sprintf(" %s installed", l)) } if process != nil { process.Done("\n") } //fmt.Printf(ascii2.EraseLine + ascii2.CursorUp(1) + ascii2.EraseLine) if err != nil { fmt.Printf("Could not install %s@%s: \n %s\n", l, version, err.Error()) } else { fmt.Printf(" %s@%s installed!\n\n", l, version) } return err }
func scanWords(wordpath string, tupleLength int, makeLookup bool) (words map[string]int, tuples map[string]string, wordsLookup map[int]string, tuplesLookup map[string][]int) { totalLines := lineCount(wordpath) inFile, _ := os.Open(wordpath) defer inFile.Close() scanner := bufio.NewScanner(inFile) scanner.Split(bufio.ScanLines) // initialize words = make(map[string]int) tuples = make(map[string]string) wordsLookup = make(map[int]string) tuplesLookup = make(map[string][]int) numTuples := 0 numWords := 0 lineNum := 0 var bar *pb.ProgressBar if VERBOSE { fmt.Println("Parsing subsets...") bar = pb.StartNew(totalLines) } for scanner.Scan() { if VERBOSE { bar.Increment() } lineNum++ s := strings.TrimSpace(scanner.Text()) _, ok := words[s] if ok == false { if makeLookup { wordsLookup[numWords] = s } else { words[s] = numWords } partials := getPartials(s, tupleLength) for i := 0; i < len(partials); i++ { _, ok := tuples[partials[i]] if makeLookup { _, ok = tuplesLookup[partials[i]] } if ok == false { if makeLookup { tuplesLookup[partials[i]] = append([]int{}, numWords) } else { tuples[partials[i]] = strconv.Itoa(numWords) } numTuples++ } else { if makeLookup { tuplesLookup[partials[i]] = append(tuplesLookup[partials[i]], numWords) } else { tuples[partials[i]] += " " + strconv.Itoa(numWords) } } } numWords++ } } if VERBOSE { bar.FinishPrint("Finished parsing subsets") } return }
func importCSV(filename string, connStr string, schema string, tableName string, ignoreErrors bool, skipHeader bool, fields string, delimiter string) error { db, err := connect(connStr, schema) if err != nil { return err } defer db.Close() var reader *csv.Reader var bar *pb.ProgressBar if filename != "" { file, err := os.Open(filename) if err != nil { return err } defer file.Close() bar = NewProgressBar(file) reader = csv.NewReader(io.TeeReader(file, bar)) } else { reader = csv.NewReader(os.Stdin) } reader.Comma, _ = utf8.DecodeRuneInString(delimiter) reader.LazyQuotes = true columns, err := parseColumns(reader, skipHeader, fields) if err != nil { return err } reader.FieldsPerRecord = len(columns) i, err := NewCSVImport(db, schema, tableName, columns) if err != nil { return err } var success, failed int if filename != "" { bar.Start() err, success, failed = copyCSVRows(i, reader, ignoreErrors, delimiter, columns) bar.Finish() } else { err, success, failed = copyCSVRows(i, reader, ignoreErrors, delimiter, columns) } if err != nil { lineNumber := success + failed if !skipHeader { lineNumber++ } return errors.New(fmt.Sprintf("line %d: %s", lineNumber, err)) } else { fmt.Println(fmt.Sprintf("%d rows imported into %s.%s", success, schema, tableName)) if ignoreErrors && failed > 0 { fmt.Println(fmt.Sprintf("%d rows could not be imported into %s.%s and have been written to stderr.", failed, schema, tableName)) } return i.Commit() } }
// Given a file on the local disk, synchronize it with Google Drive: if the // corresponding file doesn't exist on Drive, it's created; if it exists // but has different contents, the contents are updated. The Unix // permissions and file modification time on Drive are also updated // appropriately. func syncFileUp(localPath string, stat os.FileInfo, drivePath string, encrypt bool, pb *pb.ProgressBar) error { debug.Printf("syncFileUp: %s -> %s", localPath, drivePath) // Get the *drive.File for the folder to create the new file in. // This folder should definitely exist at this point, since we // create all folders needed before starting to upload files. parentFolder, err := gd.GetFile(filepath.Dir(drivePath)) if err != nil { panic(fmt.Sprintf("%s: get parent directory: %s", filepath.Dir(drivePath), err)) } baseName := filepath.Base(drivePath) var driveFile *gdrive.File if stat.IsDir() { // We only get here if the folder doesn't exist at all on Drive; if // it already exists, we updated the metadata earlier (in // fileNeedsUpload) and don't go through this path. var proplist []gdrive.Property proplist = append(proplist, gdrive.Property{Key: "Permissions", Value: fmt.Sprintf("%#o", stat.Mode()&os.ModePerm)}) driveFile, err = gd.CreateFolder(baseName, parentFolder, normalizeModTime(stat.ModTime()), proplist) checkFatalError(err, fmt.Sprintf("%s: create folder", drivePath)) if pb != nil { pb.Increment() } atomic.AddInt64(&stats.UploadBytes, stat.Size()) verbose.Printf("Created Google Drive folder %s", drivePath) } else { // We're uploading a file. Create an empty file on Google Drive if // it doesn't already exist. if driveFile, err = gd.GetFile(drivePath); err == gdrive.ErrNotExist { debug.Printf("%s doesn't exist on Drive. Creating", drivePath) var proplist []gdrive.Property if encrypt { // Compute a unique IV for the file. iv := getRandomBytes(aes.BlockSize) ivhex := hex.EncodeToString(iv) proplist = append(proplist, gdrive.Property{Key: "IV", Value: ivhex}) } proplist = append(proplist, gdrive.Property{Key: "Permissions", Value: fmt.Sprintf("%#o", stat.Mode()&os.ModePerm)}) // We explicitly set the modification time of the file to the // start of the Unix epoch, so that if the upload fails // partway through, then we won't later be confused about which // file is the correct one from having local and Drive copies // with the same time but different contents. driveFile, err = gd.CreateFile(baseName, parentFolder, time.Unix(0, 0), proplist) if err != nil { return err } } // And now upload the contents of the file, either overwriting the // contents of the existing file, or adding contents to the // just-created file. if err = uploadFileContents(localPath, driveFile, encrypt, pb); err != nil { return err } } verbose.Printf("Updated local %s -> Google Drive %s", localPath, drivePath) // Only update the modification time on Google Drive to match the local // modification time after the upload has finished successfully. return gd.UpdateModificationTime(driveFile, normalizeModTime(stat.ModTime())) }
// CheckMetadata downloads the metadata about all of the files currently // stored on Drive and compares it with the local cache. func (gd *GDrive) CheckMetadata(filename string, report func(string)) error { idToFile, err := gd.getIdToFile(filename) if err != nil { return err } // This will almost certainly take a while, so put up a progress bar. var bar *pb.ProgressBar if !gd.quiet { bar = pb.New(len(idToFile)) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Checking metadata cache: ") bar.Start() } err = gd.runQuery("trashed=false", func(f *drive.File) { if file, ok := idToFile[f.Id]; ok { df := newFile(f.Title, f) if !filesEqual(df, file) { report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v", file.Path, file, df)) } if bar != nil { bar.Increment() } delete(idToFile, f.Id) } else { // It'd be preferable to have "sharedWithMe=false" included in // the query string above, but the combination of that with // "trashed=false" seems to lead to no results being returned. if f.Shared == false { report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]", f.Title, f)) } } }) for _, f := range idToFile { report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]", f.Path, f)) } if bar != nil { bar.Finish() } return nil }
func (gd *GDrive) getMetadataChanges(svc *drive.Service, startChangeId int64, changeChan chan<- []*drive.Change, errorChan chan<- error) { var about *drive.About var err error // Get the Drive About information in order to figure out how many // changes we need to download to get up to date. for try := 0; ; try++ { about, err = svc.About.Get().Do() if err == nil { break } else { err = gd.tryToHandleDriveAPIError(err, try) } if err != nil { errorChan <- err return } } // Don't clutter the output with a progress bar unless it looks like // downloading changes may take a while. // TODO: consider using timer.AfterFunc to put up the progress bar if // we're not done after a few seconds? It's not clear if this is worth // the trouble. var bar *pb.ProgressBar numChanges := about.LargestChangeId - startChangeId if numChanges > 1000 && !gd.quiet { bar = pb.New64(numChanges) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Updating metadata cache: ") bar.Start() } pageToken := "" try := 0 // Keep asking Drive for more changes until we get through them all. for { // Only ask for the fields in the drive.Change structure that we // actually to be filled in to save some bandwidth... fields := []googleapi.Field{"nextPageToken", "items/id", "items/fileId", "items/deleted", "items/file/id", "items/file/parents", "items/file/title", "items/file/fileSize", "items/file/mimeType", "items/file/properties", "items/file/modifiedDate", "items/file/md5Checksum", "items/file/labels"} q := svc.Changes.List().MaxResults(1000).IncludeSubscribed(false).Fields(fields...) if startChangeId >= 0 { q = q.StartChangeId(startChangeId + 1) } if pageToken != "" { q = q.PageToken(pageToken) } r, err := q.Do() if err != nil { err = gd.tryToHandleDriveAPIError(err, try) if err != nil { errorChan <- err return } try++ continue } // Success. Reset the try counter in case we had errors leading up // to this. try = 0 if len(r.Items) > 0 { // Send the changes along to the goroutine that's updating the // local cache. changeChan <- r.Items if bar != nil { bar.Set(int(r.Items[len(r.Items)-1].Id - startChangeId)) } } pageToken = string(r.NextPageToken) if pageToken == "" { break } } // Signal that no more changes are coming. close(changeChan) if bar != nil { bar.Finish() } gd.debug("Done updating metadata from Drive") }
// Fetch http file url to destination dest, with or without progress. func FetchHTTPFile(url string, dest string, progress bool) (err error) { gologit.Debugf("Creating file: %s\n", dest) out, err := os.Create(dest) if err != nil { return err } defer out.Close() var r io.Reader gologit.Debugf("Fetching url: %s\n", url) resp, err := http.Get(url) defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return fmt.Errorf("Server return non-200 status: %v", resp.Status) } msgPrefix := fmt.Sprintf("%s: ", path.Base(dest)) var bar *pb.ProgressBar i, _ := strconv.Atoi(resp.Header.Get("Content-Length")) if i > 0 && progress { bar = pb.New(i).Prefix(msgPrefix).SetUnits(pb.U_BYTES) bar.ShowSpeed = true bar.RefreshRate = time.Millisecond * 700 bar.ShowFinalTime = false bar.ShowTimeLeft = false bar.Start() defer bar.Finish() r = bar.NewProxyReader(resp.Body) } else { r = resp.Body } _, err = io.Copy(out, r) return err }
func dumpToBoltDB(path string, words map[string]int, tuples map[string]string, tupleLength int) { var bar *pb.ProgressBar var start time.Time wordBuckets := int(len(words) / 600) if wordBuckets < 10 { wordBuckets = 10 } if VERBOSE { fmt.Printf("Creating %v word buckets\n", wordBuckets) } if _, err := os.Stat(path); err == nil { os.Remove(path) if VERBOSE { fmt.Println("Removed old " + path) } } // Open a new bolt database db, err := bolt.Open(path, 0600, &bolt.Options{NoGrowSync: false}) if err != nil { log.Fatal(err) } defer db.Close() if VERBOSE { fmt.Println("Creating subset buckets...") bar = pb.StartNew(len(tuples)) start = time.Now() } err = db.Batch(func(tx *bolt.Tx) error { for k := range tuples { if VERBOSE { bar.Increment() } firstLetter := string(k[0]) secondLetter := string(k[1]) if strings.Contains(alphabet, firstLetter) && strings.Contains(alphabet, secondLetter) { _, err := tx.CreateBucketIfNotExists([]byte("tuples-" + firstLetter + secondLetter)) if err != nil { return fmt.Errorf("create bucket: %s", err) } } } return nil }) if err != nil { log.Fatal(err) } if VERBOSE { elapsed := time.Since(start) bar.FinishPrint("Creating subset buckets took " + elapsed.String()) } db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("tuples")) if err != nil { return fmt.Errorf("create bucket: %s", err) } return nil }) if VERBOSE { fmt.Println("Creating words buckets...") } db.Batch(func(tx *bolt.Tx) error { for i := 0; i < wordBuckets; i++ { _, err := tx.CreateBucket([]byte("words-" + strconv.Itoa(i))) if err != nil { return fmt.Errorf("create bucket: %s", err) } } return nil }) db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("vars")) if err != nil { return fmt.Errorf("create bucket: %s", err) } return nil }) // fmt.Printf("INSERT INTO words (id,word) values (%v,'%v');\n", v, k) if VERBOSE { fmt.Println("Loading words into db...") start = time.Now() bar = pb.StartNew(len(words)) } err = db.Batch(func(tx *bolt.Tx) error { for k, v := range words { if VERBOSE { bar.Increment() } if len(k) > 0 { b := tx.Bucket([]byte("words-" + strconv.Itoa(int(math.Mod(float64(v), float64(wordBuckets)))))) b.Put([]byte(strconv.Itoa(v)), []byte(k)) } } return nil }) if err != nil { log.Fatal(err) } if VERBOSE { elapsed := time.Since(start) bar.FinishPrint("Words took " + elapsed.String()) } if VERBOSE { fmt.Println("Loading subsets into db...") start = time.Now() bar = pb.StartNew(len(tuples)) } err = db.Update(func(tx *bolt.Tx) error { for k, v := range tuples { if VERBOSE { bar.Increment() } firstLetter := string(k[0]) secondLetter := string(k[1]) if strings.Contains(alphabet, firstLetter) && strings.Contains(alphabet, secondLetter) { b := tx.Bucket([]byte("tuples-" + firstLetter + secondLetter)) b.Put([]byte(k), []byte(v)) } else { b := tx.Bucket([]byte("tuples")) b.Put([]byte(k), []byte(v)) } } return nil }) if err != nil { log.Fatal(err) // BUG(schollz): Windows file resize error: https://github.com/schollz/goagrep/issues/6 } if VERBOSE { elapsed := time.Since(start) bar.FinishPrint("Subsets took " + elapsed.String()) } db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("vars")) err := b.Put([]byte("tupleLength"), []byte(strconv.Itoa(tupleLength))) return err }) db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("vars")) err := b.Put([]byte("wordBuckets"), []byte(strconv.Itoa(wordBuckets))) return err }) }
func main() { clientID := flag.String("id", "", "Github client ID") clientSecret := flag.String("secret", "", "Github client secret") file := flag.String("file", "", "File containing the list of packages") output := flag.String("output", "gddofork.out", "Output file") progress := flag.Bool("progress", false, "Show a progress bar") flag.Parse() var auth *gddoexp.GithubAuth if (clientID != nil && *clientID != "") || (clientSecret != nil && *clientSecret != "") { if *clientID == "" || *clientSecret == "" { fmt.Println("to enable Gthub authentication, you need to inform the id and secret") flag.PrintDefaults() return } auth = &gddoexp.GithubAuth{ ID: *clientID, Secret: *clientSecret, } } var pkgs []database.Package var err error if file != nil && *file != "" { pkgs, err = readFromFile(*file) } else { pkgs, err = readFromStdin() } if err != nil { fmt.Println(err) return } o, err := os.OpenFile(*output, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) if err != nil { fmt.Println("error creating output file:", err) return } defer o.Close() log.SetOutput(o) log.Println("BEGIN") log.Printf("%d packages will be analyzed", len(pkgs)) var progressBar *pb.ProgressBar if progress != nil && *progress { progressBar = pb.StartNew(len(pkgs)) } var cache int for response := range gddoexp.AreFastForkPackages(pkgs, auth) { if progress != nil && *progress { progressBar.Increment() } if response.Cache { cache++ } if response.Error != nil { log.Println(response.Error) } else if response.FastFork { log.Printf("package “%s” is a fast fork\n", response.Path) if progress != nil && !*progress { fmt.Println(response.Path) } } else { log.Printf("package “%s” is not a fast fork\n", response.Path) } } if progress != nil && *progress { progressBar.Finish() } log.Println("Cache hits:", cache) log.Println("END") }
// uploadFileContents does its best to upload the local file stored at // localPath to the given *drive.File on Google Drive. (It assumes that // the *drive.File has already been created.) func uploadFileContents(localPath string, driveFile *gdrive.File, encrypt bool, pb *pb.ProgressBar) error { var iv []byte var err error if encrypt { iv, err = getInitializationVector(driveFile) if err != nil { return fmt.Errorf("unable to get IV: %v", err) } } for try := 0; ; try++ { contentsReader, length, err := getFileContentsReaderForUpload(localPath, encrypt, iv) if contentsReader != nil { defer contentsReader.Close() } if err != nil { return err } // Keep track of how many bytes are uploaded in case we fail // part-way through and need to roll back the progress bar. countingReader := &byteCountingReader{R: contentsReader} // Also tee reads to the progress bar as they are done so that it // stays in sync with how much data has been transmitted. var uploadReader io.Reader if pb != nil { uploadReader = io.TeeReader(countingReader, pb) } else { uploadReader = countingReader } if length >= resumableUploadMinSize { err = gd.UploadFileContentsResumable(driveFile, uploadReader, length) } else { err = gd.UploadFileContents(driveFile, uploadReader, length, try) } atomic.AddInt64(&stats.DiskReadBytes, countingReader.bytesRead) if err == nil { // Success! atomic.AddInt64(&stats.DriveFilesUpdated, 1) atomic.AddInt64(&stats.UploadBytes, length) return nil } // The "progress" made so far on this file should be rolled back; // if we don't do this, when retries happen, we end up going over // 100% progress... if pb != nil { pb.Add64(-countingReader.bytesRead) } if re, ok := err.(gdrive.RetryHTTPTransmitError); ok && try < 5 { debug.Printf("%s: got retry http error--retrying: %s", localPath, re.Error()) } else { debug.Printf("%s: giving up due to error: %v", localPath, err) // We're giving up on this file, so subtract its length from // what the progress bar is expecting. if pb != nil { pb.Total -= length } return err } } }
func main() { flag.Usage = usage help := flag.Bool("help", false, "show this message") version := flag.Bool("version", false, "show version") failpath := flag.String("faildir", "", "dir where failed torrentzips should be copied") flag.Parse() if *help { flag.Usage() os.Exit(0) } if *version { fmt.Fprintf(os.Stdout, "%s version %s, Copyright (c) 2013 Uwe Hoffmann. All rights reserved.\n", os.Args[0], versionStr) os.Exit(0) } if *failpath == "" { flag.Usage() os.Exit(0) } cv := new(countVisitor) for _, name := range flag.Args() { fmt.Fprintf(os.Stdout, "initial scan of %s to determine amount of work\n", name) err := filepath.Walk(name, cv.visit) if err != nil { fmt.Fprintf(os.Stderr, "failed to count in dir %s: %v\n", name, err) os.Exit(1) } } mg := int(cv.numBytes / megabyte) fmt.Fprintf(os.Stdout, "found %d files and %d MB to do. starting work...\n", cv.numFiles, mg) var byteProgress *pb.ProgressBar if mg > 10 { pb.BarStart = "MB [" byteProgress = pb.New(mg) byteProgress.RefreshRate = 5 * time.Second byteProgress.ShowCounters = true byteProgress.Start() } inwork := make(chan *workUnit) sv := &scanVisitor{ inwork: inwork, } wg := new(sync.WaitGroup) wg.Add(cv.numFiles) for i := 0; i < 8; i++ { worker := &testWorker{ byteProgress: byteProgress, failpath: *failpath, inwork: inwork, wg: wg, } go worker.run() } for _, name := range flag.Args() { err := filepath.Walk(name, sv.visit) if err != nil { fmt.Fprintf(os.Stderr, "failed to scan dir %s: %v\n", name, err) os.Exit(1) } } wg.Wait() close(inwork) if byteProgress != nil { byteProgress.Set(int(byteProgress.Total)) byteProgress.Finish() } fmt.Fprintf(os.Stdout, "Done.\n") }
func runImport(args *docopt.Args, client controller.Client) error { var src io.Reader = os.Stdin if filename := args.String["--file"]; filename != "" { f, err := os.Open(filename) if err != nil { return fmt.Errorf("error opening export file: %s", err) } defer f.Close() src = f } tr := tar.NewReader(src) var ( app *ct.App release *ct.Release imageArtifact *ct.Artifact formation *ct.Formation routes []router.Route slug io.Reader dockerImage struct { config struct { Tag string `json:"tag"` } archive io.Reader } pgDump io.Reader mysqlDump io.Reader uploadSize int64 ) numResources := 0 numRoutes := 1 for { header, err := tr.Next() if err == io.EOF { break } else if err != nil { return fmt.Errorf("error reading export tar: %s", err) } switch path.Base(header.Name) { case "app.json": app = &ct.App{} if err := json.NewDecoder(tr).Decode(app); err != nil { return fmt.Errorf("error decoding app: %s", err) } app.ID = "" case "release.json": release = &ct.Release{} if err := json.NewDecoder(tr).Decode(release); err != nil { return fmt.Errorf("error decoding release: %s", err) } release.ID = "" release.ArtifactIDs = nil case "artifact.json": imageArtifact = &ct.Artifact{} if err := json.NewDecoder(tr).Decode(imageArtifact); err != nil { return fmt.Errorf("error decoding image artifact: %s", err) } imageArtifact.ID = "" case "formation.json": formation = &ct.Formation{} if err := json.NewDecoder(tr).Decode(formation); err != nil { return fmt.Errorf("error decoding formation: %s", err) } formation.AppID = "" formation.ReleaseID = "" case "routes.json": if err := json.NewDecoder(tr).Decode(&routes); err != nil { return fmt.Errorf("error decoding routes: %s", err) } for _, route := range routes { route.ID = "" route.ParentRef = "" } case "slug.tar.gz": f, err := ioutil.TempFile("", "slug.tar.gz") if err != nil { return fmt.Errorf("error creating slug tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading slug: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking slug tempfile: %s", err) } slug = f uploadSize += header.Size case "docker-image.json": if err := json.NewDecoder(tr).Decode(&dockerImage.config); err != nil { return fmt.Errorf("error decoding docker image json: %s", err) } case "docker-image.tar": f, err := ioutil.TempFile("", "docker-image.tar") if err != nil { return fmt.Errorf("error creating docker image tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading docker image: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking docker image tempfile: %s", err) } dockerImage.archive = f uploadSize += header.Size case "postgres.dump": f, err := ioutil.TempFile("", "postgres.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } pgDump = f uploadSize += header.Size case "mysql.dump": f, err := ioutil.TempFile("", "mysql.dump") if err != nil { return fmt.Errorf("error creating db tempfile: %s", err) } defer f.Close() defer os.Remove(f.Name()) if _, err := io.Copy(f, tr); err != nil { return fmt.Errorf("error reading db dump: %s", err) } if _, err := f.Seek(0, os.SEEK_SET); err != nil { return fmt.Errorf("error seeking db tempfile: %s", err) } mysqlDump = f uploadSize += header.Size } } if app == nil { return fmt.Errorf("missing app.json") } oldName := app.Name if name := args.String["--name"]; name != "" { app.Name = name } if err := client.CreateApp(app); err != nil { return fmt.Errorf("error creating app: %s", err) } var bar *pb.ProgressBar if !args.Bool["--quiet"] && uploadSize > 0 && term.IsTerminal(os.Stderr.Fd()) { bar = pb.New(0) bar.SetUnits(pb.U_BYTES) bar.Total = uploadSize bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() } if pgDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "postgres", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning postgres resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getPgRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting postgres config: %s", err) } config.Stdin = pgDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := pgRestore(client, config); err != nil { return fmt.Errorf("error restoring postgres database: %s", err) } } if mysqlDump != nil && release != nil { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "mysql", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning mysql resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } config, err := getMysqlRunConfig(client, app.ID, release) if err != nil { return fmt.Errorf("error getting mysql config: %s", err) } config.Stdin = mysqlDump if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } config.Exit = false if err := mysqlRestore(client, config); err != nil { return fmt.Errorf("error restoring mysql database: %s", err) } } if release != nil && release.Env["FLYNN_REDIS"] != "" { res, err := client.ProvisionResource(&ct.ResourceReq{ ProviderID: "redis", Apps: []string{app.ID}, }) if err != nil { return fmt.Errorf("error provisioning redis resource: %s", err) } numResources++ if release.Env == nil { release.Env = make(map[string]string, len(res.Env)) } for k, v := range res.Env { release.Env[k] = v } } uploadSlug := release != nil && imageArtifact != nil && slug != nil if uploadSlug { // Use current slugrunner as the artifact gitreceiveRelease, err := client.GetAppRelease("gitreceive") if err != nil { return fmt.Errorf("unable to retrieve gitreceive release: %s", err) } if id, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_ID"]; ok { imageArtifact, err = client.GetArtifact(id) if err != nil { return fmt.Errorf("unable to get slugrunner image artifact: %s", err) } } else if uri, ok := gitreceiveRelease.Env["SLUGRUNNER_IMAGE_URI"]; ok { imageArtifact = &ct.Artifact{ Type: host.ArtifactTypeDocker, URI: uri, } } else { return fmt.Errorf("gitreceive env missing slug runner image") } } if dockerImage.config.Tag != "" && dockerImage.archive != nil { // load the docker image into the Docker daemon cmd := exec.Command("docker", "load") cmd.Stdin = dockerImage.archive if out, err := cmd.CombinedOutput(); err != nil { return fmt.Errorf("error running docker load: %s: %q", err, out) } // use the tag from the config (which will now be applied to // the loaded image) to push the image to docker-receive cluster, err := getCluster() if err != nil { return err } host, err := cluster.DockerPushHost() if err != nil { return err } tag := fmt.Sprintf("%s/%s:latest", host, app.Name) if out, err := exec.Command("docker", "tag", "--force", dockerImage.config.Tag, tag).CombinedOutput(); err != nil { return fmt.Errorf("error tagging docker image: %s: %q", err, out) } artifact, err := dockerPush(client, app.Name, tag) if err != nil { return fmt.Errorf("error pushing docker image: %s", err) } release.ArtifactIDs = []string{artifact.ID} } else if imageArtifact != nil { if imageArtifact.ID == "" { if err := client.CreateArtifact(imageArtifact); err != nil { return fmt.Errorf("error creating image artifact: %s", err) } } release.ArtifactIDs = []string{imageArtifact.ID} } if release != nil { for t, proc := range release.Processes { for i, port := range proc.Ports { if port.Service != nil && strings.HasPrefix(port.Service.Name, oldName) { proc.Ports[i].Service.Name = strings.Replace(port.Service.Name, oldName, app.Name, 1) } } release.Processes[t] = proc } if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if uploadSlug { slugURI := fmt.Sprintf("http://blobstore.discoverd/%s/slug.tgz", random.UUID()) config := runConfig{ App: app.ID, Release: release.ID, DisableLog: true, Args: []string{"curl", "--request", "PUT", "--upload-file", "-", slugURI}, Stdin: slug, Stdout: ioutil.Discard, Stderr: ioutil.Discard, } if bar != nil { config.Stdin = bar.NewProxyReader(config.Stdin) } if err := runJob(client, config); err != nil { return fmt.Errorf("error uploading slug: %s", err) } slugArtifact := &ct.Artifact{ Type: host.ArtifactTypeFile, URI: slugURI, } if err := client.CreateArtifact(slugArtifact); err != nil { return fmt.Errorf("error creating slug artifact: %s", err) } release.ID = "" release.ArtifactIDs = append(release.ArtifactIDs, slugArtifact.ID) if release.Meta == nil { release.Meta = make(map[string]string, 1) } release.Meta["git"] = "true" if err := client.CreateRelease(release); err != nil { return fmt.Errorf("error creating release: %s", err) } if err := client.SetAppRelease(app.ID, release.ID); err != nil { return fmt.Errorf("error setting app release: %s", err) } } if formation != nil && release != nil { formation.ReleaseID = release.ID formation.AppID = app.ID if err := client.PutFormation(formation); err != nil { return fmt.Errorf("error creating formation: %s", err) } } if args.Bool["--routes"] { for _, route := range routes { if err := client.CreateRoute(app.ID, &route); err != nil { if e, ok := err.(hh.JSONError); ok && e.Code == hh.ConflictErrorCode { // If the cluster domain matches then the default route // exported will conflict with the one created automatically. continue } return fmt.Errorf("error creating route: %s", err) } numRoutes++ } } fmt.Printf("Imported %s (added %d routes, provisioned %d resources)\n", app.Name, numRoutes, numResources) return nil }
// Synchronize a local directory hierarchy with Google Drive. // localPath is the file or directory to start with, driveRoot is // the directory into which the file/directory will be sent func syncHierarchyUp(localPath string, driveRoot string, encrypt bool, trustTimes bool, maxSymlinkDepth int) int { if encrypt && key == nil { key = decryptEncryptionKey() } fileMappings, nUploadErrors := compileUploadFileTree(localPath, driveRoot, encrypt, trustTimes, maxSymlinkDepth) if len(fileMappings) == 0 { message("No files to be uploaded.") return 0 } nBytesToUpload := int64(0) for _, info := range fileMappings { if !info.LocalFileInfo.IsDir() { nBytesToUpload += info.LocalFileInfo.Size() } } // Given the list of files to sync, first find all of the directories and // then either get or create a Drive folder for each one. directoryMappingMap := make(map[string]localToRemoteFileMapping) var directoryNames []string for _, localfile := range fileMappings { if localfile.LocalFileInfo.IsDir() { directoryNames = append(directoryNames, localfile.DrivePath) directoryMappingMap[localfile.DrivePath] = localfile } } // Now sort the directories by name, which ensures that the parent of each // directory has already been created if we need to create its children. sort.Strings(directoryNames) if len(directoryNames) > 0 { // Actually create/update the directories. var dirProgressBar *pb.ProgressBar if !quiet { dirProgressBar = pb.New(len(directoryNames)) dirProgressBar.Output = os.Stderr dirProgressBar.Prefix("Directories: ") dirProgressBar.Start() } // Sync each of the directories, which serves to create any missing ones. for _, dirName := range directoryNames { file := directoryMappingMap[dirName] err := syncFileUp(file.LocalPath, file.LocalFileInfo, file.DrivePath, encrypt, dirProgressBar) if err != nil { // Errors creating directories are basically unrecoverable, // as they'll prevent us from later uploading any files in // them. printErrorAndExit(err) } } if dirProgressBar != nil { dirProgressBar.Finish() } } var fileProgressBar *pb.ProgressBar if !quiet { fileProgressBar = pb.New64(nBytesToUpload).SetUnits(pb.U_BYTES) fileProgressBar.Output = os.Stderr fileProgressBar.Prefix("Files: ") fileProgressBar.Start() } // Sort the files by size, small to large. sort.Sort(localToRemoteBySize(fileMappings)) // The two indices uploadFrontIndex and uploadBackIndex point to the // range of elements in the fileMappings array that haven't yet been // uploaded. uploadFrontIndex := 0 uploadBackIndex := len(fileMappings) - 1 // First, upload any large files that will use the resumable upload // protocol using a single thread; more threads here doesn't generally // help improve bandwidth utilizaiton and seems to make rate limit // errors from the Drive API more frequent... for ; uploadBackIndex >= 0; uploadBackIndex-- { if fileMappings[uploadBackIndex].LocalFileInfo.Size() < resumableUploadMinSize { break } fm := fileMappings[uploadBackIndex] if fm.LocalFileInfo.IsDir() { continue } if err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt, fileProgressBar); err != nil { addErrorAndPrintMessage(&nUploadErrors, fm.LocalPath, err) } } // Upload worker threads send a value over this channel when // they're done; the code that launches them waits for all of them // to do so before returning. doneChan := make(chan int, nWorkers) // Now that multiple threads are running, we need a mutex to protect // access to uploadFrontIndex and uploadBackIndex. var uploadIndexMutex sync.Mutex // All but one of the upload threads will grab files to upload starting // from the begining of the fileMappings array, thus doing the smallest // files first; one thread starts from the back of the array, doing the // largest files first. In this way, the large files help saturate the // available upload bandwidth and hide the fixed overhead of creating // the smaller files. uploadWorker := func(startFromFront bool) { for { uploadIndexMutex.Lock() if uploadFrontIndex > uploadBackIndex { // All files have been uploaded. debug.Printf("All files uploaded [%d,%d]; exiting", uploadFrontIndex, uploadBackIndex) uploadIndexMutex.Unlock() doneChan <- 1 break } // Get the index into fileMappings for the next file this // worker should upload. var index int if startFromFront { index = uploadFrontIndex uploadFrontIndex++ } else { index = uploadBackIndex uploadBackIndex-- } uploadIndexMutex.Unlock() fm := fileMappings[index] if fm.LocalFileInfo.IsDir() { // Directories have already been taken care of. continue } err := syncFileUp(fm.LocalPath, fm.LocalFileInfo, fm.DrivePath, encrypt, fileProgressBar) if err != nil { atomic.AddInt32(&nUploadErrors, 1) fmt.Fprintf(os.Stderr, "\nskicka: %s: %v\n", fm.LocalPath, err) } } } // Launch the workers. for i := 0; i < nWorkers; i++ { // All workers except the first one start from the front of // the array. go uploadWorker(i != 0) } // Wait for all of the workers to finish. for i := 0; i < nWorkers; i++ { <-doneChan } if fileProgressBar != nil { fileProgressBar.Finish() } if nUploadErrors > 0 { fmt.Fprintf(os.Stderr, "skicka: %d files not uploaded due to errors. "+ "This may be a transient failure; try uploading again.\n", nUploadErrors) } return int(nUploadErrors) }