func Example_multiple() { // create bars first := pb.New(200).Prefix("First ") second := pb.New(200).Prefix("Second ") third := pb.New(200).Prefix("Third ") // start pool pool, err := pb.StartPool(first, second, third) if err != nil { panic(err) } // update bars wg := new(sync.WaitGroup) for _, bar := range []*pb.ProgressBar{first, second, third} { wg.Add(1) go func(cb *pb.ProgressBar) { for n := 0; n < 200; n++ { cb.Increment() time.Sleep(time.Millisecond * time.Duration(rand.Intn(100))) } cb.Finish() wg.Done() }(bar) } wg.Wait() // close pool pool.Stop() }
//SetupLog sets up initial ConnectionLog func SetupLog(length, noOfConn int) error { connLog.stats = make([]ConnectionStat, noOfConn) barArray := make([]*pb.ProgressBar, noOfConn+1) lenSub := length / noOfConn for i := 0; i < noOfConn; i++ { fileBegin := lenSub * i fileEnd := lenSub * (i + 1) if i == noOfConn-1 { fileEnd = length } bar := pb.New(fileEnd - fileBegin).Prefix("Connection " + strconv.Itoa(i+1) + " ") customizeBar(bar) connLog.stats[i] = ConnectionStat{connectionIndex: i, pbar: bar} barArray[i] = bar } bar := pb.New(length).Prefix("Total ") customizeBar(bar) connLog.totalbar = bar barArray[noOfConn] = bar var err error connLog.pool, err = pb.StartPool(barArray...) if err != nil { return err } return nil }
// Update replaces the inode of the current executable with the latest version // n.b. this won't work on Windows func (u *Updater) Update() error { u.l.Infoln("Downloading version", u.ServerVersion.Version) werckerPath, err := filepath.Abs(os.Args[0]) if err != nil { return err } // Put new version in tempfile in parent directory. temp, err := ioutil.TempFile(filepath.Dir(werckerPath), fmt.Sprintf(".%s-", u.ServerVersion.Version)) if err != nil { return err } defer temp.Close() newVersion, err := http.Get(u.DownloadURL()) if err != nil { return err } defer newVersion.Body.Close() bar := pb.New(int(newVersion.ContentLength)).SetUnits(pb.U_BYTES) bar.Start() writer := io.MultiWriter(temp, bar) _, err = io.Copy(writer, newVersion.Body) if err != nil { return err } temp.Chmod(0755) return os.Rename(temp.Name(), werckerPath) }
func downloader(uri string, file string) { var log = logrus.New() log.Formatter = new(logrus.JSONFormatter) log.Debug("creating file.") outFile, err := os.Create(file) if err != nil { os.Exit(1) } client := &http.Client{} log.Debugf("new request for %+v", uri) req, err := http.NewRequest("GET", uri, nil) log.Debug("request initiated.") resp, err := client.Do(req) //progress bar header := resp.ContentLength bar := pb.New(int(header)).SetUnits(pb.U_BYTES) bar.Start() reader := bar.NewProxyReader(resp.Body) if err != nil { panic(err) } // not closing reader as the NewProxyReader exits for me. log.Debugf("copying response data to %+v", file) io.Copy(outFile, reader) os.Exit(0) }
func runPgRestore(args *docopt.Args, client controller.Client, config *runConfig) error { config.Stdin = os.Stdin var size int64 if filename := args.String["--file"]; filename != "" { f, err := os.Open(filename) if err != nil { return err } defer f.Close() stat, err := f.Stat() if err != nil { return err } size = stat.Size() config.Stdin = f } if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) { bar := pb.New(0) bar.SetUnits(pb.U_BYTES) if size > 0 { bar.Total = size } else { bar.ShowBar = false } bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() config.Stdin = bar.NewProxyReader(config.Stdin) } return pgRestore(client, config) }
func newPb(size int) (bar *pb.ProgressBar) { bar = pb.New(size) bar.SetRefreshRate(time.Millisecond) bar.ShowSpeed = true bar.Start() return }
func share(srv *drive.Service, accountFrom string, accountTo string) error { // List all files and folders files, err := findAllFilesFrom(srv, accountFrom) if err != nil { return err } fmt.Printf("Found: %d files or directories\n", len(files)) // Progress bar bar := pb.New(len(files)) bar.SetRefreshRate(time.Second) bar.Start() for _, file := range files { bar.Increment() err := shareFile(srv, file, accountTo) if err != nil { return err } } bar.FinishPrint("Done.") // Everything is OK return nil }
func imageDownloader(uri string, filename string, wg *sync.WaitGroup) { defer wg.Done() tokens := strings.Split(uri, "/") fileName := tokens[len(tokens)-1] outFile, err := os.Create(fileName) if err != nil { fmt.Println(err) } client := &http.Client{} req, err := http.NewRequest("GET", uri, nil) resp, err := client.Do(req) if err != nil { fmt.Println(err) } defer resp.Body.Close() header := resp.ContentLength bar := pb.New(int(header)).SetUnits(pb.U_BYTES) bar.SetRefreshRate(time.Millisecond) // bar.Start() rd := bar.NewProxyReader(resp.Body) // and copy from reader io.Copy(outFile, rd) if err != nil { fmt.Println(err) } }
func runRedisDump(args *docopt.Args, client controller.Client, config *runConfig) error { config.Stdout = os.Stdout if filename := args.String["--file"]; filename != "" { f, err := os.Create(filename) if err != nil { return err } defer f.Close() config.Stdout = f } if !args.Bool["--quiet"] && term.IsTerminal(os.Stderr.Fd()) { bar := pb.New(0) bar.SetUnits(pb.U_BYTES) bar.ShowBar = false bar.ShowSpeed = true bar.Output = os.Stderr bar.Start() defer bar.Finish() config.Stdout = io.MultiWriter(config.Stdout, bar) } config.Args[0] = "/bin/dump-flynn-redis" return runJob(client, *config) }
func newPb(size int) (bar *pb.ProgressBar) { bar = pb.New(size) bar.Current = barChar bar.BarStart = "" bar.BarEnd = "" bar.Start() return }
// Start starts showing progress func (t *TextProgress) Start(pkg string, total float64) { // TODO go to New64 once we update the pb package. t.pbar = pb.New(0) t.pbar.Total = int64(total) t.pbar.ShowSpeed = true t.pbar.Units = pb.U_BYTES t.pbar.Start() }
func (c *MountCommand) cacheWithProgress(cacheReq req.Cache) (err error) { // doneErr is used to wait until the cache progress is done, and also send // any error encountered. We simply send nil if there is no error. doneErr := make(chan error) // The creation of the pb objection presents a CLI progress bar to the user. bar := pb.New(100) bar.SetMaxWidth(100) bar.Start() // The callback, used to update the progress bar as remote.cache downloads cacheProgressCallback := func(par *dnode.Partial) { type Progress struct { Progress int `json:progress` Error kite.Error `json:error` } // TODO: Why is this an array from Klient? How can this be written cleaner? ps := []Progress{{}} par.MustUnmarshal(&ps) p := ps[0] if p.Error.Message != "" { doneErr <- p.Error c.Log.Error("remote.cacheFolder progress callback returned an error. err:%s", err) c.printfln(defaultHealthChecker.CheckAllFailureOrMessagef( FailedPrefetchFolder, )) } bar.Set(p.Progress) // TODO: Disable the callback here, so that it's impossible to double call // the progress after competion - to avoid weird/bad UX and errors. if p.Progress == 100 { doneErr <- nil } } // c.callRemoteCache handles UX if err := c.callRemoteCache(cacheReq, cacheProgressCallback); err != nil { return err } if err := <-doneErr; err != nil { c.printfln("") // newline to ensure the progress bar ends c.printfln( defaultHealthChecker.CheckAllFailureOrMessagef(FailedPrefetchFolder), ) return fmt.Errorf( "remote.cacheFolder progress callback returned an error. err:%s", err, ) } bar.Finish() return nil }
//SetupResumeLog sets up ConnectionLog for a resumed download func SetupResumeLog(filename string, length, noOfConn int) error { connLog.stats = make([]ConnectionStat, noOfConn) barArray := make([]*pb.ProgressBar, noOfConn+1) totalbar := pb.New(length).Prefix("Total ") lenSub := length / noOfConn for i := 0; i < noOfConn; i++ { partFilename := "temp/" + filename + "_" + strconv.Itoa(i) if _, err := os.Stat(partFilename); err == nil { reader, err := ioutil.ReadFile(partFilename) if err != nil { return err } header := reader[:16] fileBegin := int(binary.LittleEndian.Uint64(header[0:8])) fileEnd := int(binary.LittleEndian.Uint64(header[8:16])) bar := pb.New(fileEnd - fileBegin).Prefix("Connection " + strconv.Itoa(i+1) + " ") for j := 0; j < len(reader)-16; j++ { bar.Increment() totalbar.Increment() } customizeBar(bar) connLog.stats[i] = ConnectionStat{connectionIndex: i, pbar: bar} barArray[i] = bar } else { fileBegin := lenSub * i fileEnd := lenSub * (i + 1) if i == noOfConn-1 { fileEnd = length } bar := pb.New(fileEnd - fileBegin).Prefix("Connection " + strconv.Itoa(i+1) + " ") customizeBar(bar) connLog.stats[i] = ConnectionStat{connectionIndex: i, pbar: bar} barArray[i] = bar } } customizeBar(totalbar) connLog.totalbar = totalbar barArray[noOfConn] = totalbar var err error connLog.pool, err = pb.StartPool(barArray...) if err != nil { return err } return nil }
func main() { runClean := flag.Bool("cleanup", false, "Cleanup merged branches") noUpdate := flag.Bool("no-update", false, "Skip updating repositories") flag.Parse() if len(flag.Arg(0)) == 0 { fmt.Println("ERROR: You have to specify the directory with repositiories to update.") os.Exit(1) } errors := "" u := api.Updater{} u.DirHandler = &git.GitHandler{Dir: flag.Arg(0)} if err := u.DirHandler.Prepare(); err != nil { errors += fmt.Sprintf("Pre-update errors:\n%s", err) } bar := pb.New(len(u.DirHandler.Repositories())) bar.ShowTimeLeft = false bar.ShowSpeed = false bar.Start() completeFunc := func() error { bar.Increment() return nil } if !*noUpdate { fmt.Fprintf(os.Stdout, "Updating repositories ...\n") if err := u.DirHandler.Update(completeFunc); err != nil { errors += fmt.Sprintf("Post-update errors:\n%s", err) } bar.FinishPrint(u.DirHandler.Summary()) } if *runClean { fmt.Fprintf(os.Stdout, "Cleaning up merged branches ...\n") report := "" for _, r := range u.DirHandler.Repositories() { out, err := git.CleanMergedBranches(r, completeFunc) if err != nil { errors += fmt.Sprintf("\n%s\n", err) } else { report += fmt.Sprintf("\n%s:%s\n", r.Name(), out) } } bar.FinishPrint(report) } if len(errors) > 0 { fmt.Fprintf(os.Stderr, "%s", errors) os.Exit(1) } }
func rangeFunc(cmd *cobra.Command, args []string) { if len(args) == 0 || len(args) > 2 { fmt.Fprintln(os.Stderr, cmd.Usage()) os.Exit(1) } k := args[0] end := "" if len(args) == 2 { end = args[1] } if rangeConsistency == "l" { fmt.Println("bench with linearizable range") } else if rangeConsistency == "s" { fmt.Println("bench with serializable range") } else { fmt.Fprintln(os.Stderr, cmd.Usage()) os.Exit(1) } results = make(chan result) requests := make(chan v3.Op, totalClients) bar = pb.New(rangeTotal) clients := mustCreateClients(totalClients, totalConns) bar.Format("Bom !") bar.Start() for i := range clients { wg.Add(1) go doRange(clients[i].KV, requests) } pdoneC := printReport(results) go func() { for i := 0; i < rangeTotal; i++ { opts := []v3.OpOption{v3.WithRange(end)} if rangeConsistency == "s" { opts = append(opts, v3.WithSerializable()) } op := v3.OpGet(k, opts...) requests <- op } close(requests) }() wg.Wait() bar.Finish() close(results) <-pdoneC }
func printStatus() { count := cap(Buffers[0]) bar = pb.New(count) bar.SetRefreshRate(100 * time.Millisecond) bar.ShowCounters = true bar.ShowTimeLeft = false bar.Start() }
func SetupProgressBar(size int, prefix string) *pb.ProgressBar { bar := pb.New(size) bar.SetUnits(pb.U_BYTES) bar.SetRefreshRate(time.Millisecond * 10) bar.ShowCounters = false bar.ShowFinalTime = false bar.SetWidth(100) bar.Prefix(prefix) return bar }
// Copies the content of src file to dst. Overwrites dst file if ow os true func CopyFile(src, dst string, ow bool) (int64, error) { // Create source var source io.Reader s, err := os.Open(src) if err != nil { log.Error("Couldn't open", src, err) return 0, err } defer s.Close() // Stat source srcStat, err := s.Stat() if err != nil { log.Error("Couldn't stat", src, err) return 0, err } sourceSize := srcStat.Size() source = s // Check if dst exists d, err := os.Open(dst) if !os.IsNotExist(err) { if ow == false { return 0, err } } defer d.Close() // Create dest dest, err := os.Create(dst) if err != nil { log.Error("Couldn't create", dst, err) return 0, err } defer dest.Close() // Create the progress bar bar := pb.New(int(sourceSize)).SetUnits(pb.U_BYTES).SetRefreshRate(time.Millisecond * 10).Prefix(truncate(path.Base(src), 10, true) + ": ") bar.Format("<.->") bar.ShowSpeed = true bar.Start() // Copy writer := io.MultiWriter(dest, bar) written, err := io.Copy(writer, source) if err != nil { log.Error("Couldn't copy", err) return 0, err } bar.Finish() return written, nil }
func main() { opts, args := parseFlags() conv := cbconvert.NewConvertor(opts) var bar *pb.ProgressBar c := make(chan os.Signal, 3) signal.Notify(c, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) go func() { for _ = range c { fmt.Fprintf(os.Stderr, "Aborting\n") os.RemoveAll(conv.Workdir) os.Exit(1) } }() if _, err := os.Stat(opts.Outdir); err != nil { os.MkdirAll(opts.Outdir, 0777) } files := conv.GetFiles(args) if opts.Cover || opts.Thumbnail { if !opts.Quiet { bar = pb.New(conv.Nfiles) bar.ShowTimeLeft = false bar.Start() } } for _, file := range files { stat, err := os.Stat(file) if err != nil { fmt.Fprintf(os.Stderr, "Error Stat: %v\n", err.Error()) continue } if opts.Cover { conv.ExtractCover(file, stat) if !opts.Quiet { bar.Increment() } continue } else if opts.Thumbnail { conv.ExtractThumbnail(file, stat) if !opts.Quiet { bar.Increment() } continue } conv.ConvertComic(file, stat) } }
// Create a default progress bar. func (c *Client) newDefaultProgressBar(fileLength int) *pb.ProgressBar { bar := pb.New(fileLength) bar.ShowSpeed = true bar.ShowTimeLeft = true bar.ShowCounters = true bar.Units = pb.U_BYTES bar.SetRefreshRate(time.Second) bar.SetWidth(80) bar.SetMaxWidth(80) return bar }
// download a file with the HTTP/HTTPS protocol showing a progress bar. The destination file is // always overwritten. func download(rawurl string, destinationPath string) { tempDestinationPath := destinationPath + ".tmp" destination, err := os.Create(tempDestinationPath) if err != nil { log.Fatalf("Unable to open the destination file: %s", tempDestinationPath) } defer destination.Close() response, err := customGet(rawurl) if err != nil { log.Fatalf("Unable to open a connection to %s", rawurl) } defer response.Body.Close() if response.StatusCode != http.StatusOK { log.Fatalf("Unexpected HTTP response code. Wanted 200 but got %d", response.StatusCode) } var progressBar *pb.ProgressBar contentLength, err := strconv.Atoi(response.Header.Get("Content-Length")) if err == nil { progressBar = pb.New(int(contentLength)) } else { progressBar = pb.New(0) } defer progressBar.Finish() progressBar.ShowSpeed = true progressBar.SetRefreshRate(time.Millisecond * 1000) progressBar.SetUnits(pb.U_BYTES) progressBar.Start() writer := io.MultiWriter(destination, progressBar) io.Copy(writer, response.Body) destination.Close() os.Rename(tempDestinationPath, destinationPath) }
// DownloadR download an R installer from a specified URL func downloadR(url string, rootPath string) string { // Parse URL and create filename from last element tokens := strings.Split(url, "/") fileName := tokens[len(tokens)-1] // Check if file has already been downloaded. If not, create the file at the specified path installerPath := createDownloadPath(rootPath, fileName) fmt.Println(installerPath) // Check to see if the installer has already been downloaded if _, err := os.Stat(installerPath); err == nil { fmt.Println(fileName, "already exists!") // return forward-slash installer path return filepath.ToSlash(installerPath) } output, err := os.Create(installerPath) errCheck(err) defer output.Close() // Start download process fmt.Println("Downloading", url, "to", fileName) // Download file from URL response, err := http.Get(url) errCheck(err) defer response.Body.Close() // Print http response status to console fmt.Println(response.Status) // Get the response size from the HTTP header for progress bar responseSize, _ := strconv.Atoi(response.Header.Get("Content-Length")) // Create progress bar bar := pb.New(int(responseSize)).SetUnits(pb.U_BYTES).SetRefreshRate(time.Millisecond * 10) bar.ShowSpeed = true bar.SetWidth(120) bar.Start() // Create multi-writer for output destination and progress bar writer := io.MultiWriter(output, bar) // Copy to output _, err = io.Copy(writer, response.Body) errCheck(err) bar.Finish() fmt.Printf("%s with %v bytes downloaded\n", fileName, responseSize) // return forward-slash installer path return filepath.ToSlash(installerPath) }
func uploadIndex(c *cli.Context, index client.Index) error { missing, err := rackClient(c).IndexMissing(index) if err != nil { return err } total := 0 for _, m := range missing { total += index[m].Size } bar := pb.New(total) bar.Prefix("Uploading changes... ") bar.SetMaxWidth(40) bar.SetUnits(pb.U_BYTES) if total == 0 { fmt.Println("NONE") } else { bar.Start() } inch := make(chan string) errch := make(chan error) for i := 1; i < IndexOperationConcurrency; i++ { go uploadItems(c, index, bar, inch, errch) } go func() { for _, hash := range missing { inch <- hash } }() for range missing { if err := <-errch; err != nil { return err } } close(inch) if total > 0 { bar.Finish() } return nil }
func putFunc(cmd *cobra.Command, args []string) { if keySpaceSize <= 0 { fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", keySpaceSize) os.Exit(1) } results = make(chan result) requests := make(chan v3.Op, totalClients) bar = pb.New(putTotal) k, v := make([]byte, keySize), string(mustRandBytes(valSize)) clients := mustCreateClients(totalClients, totalConns) bar.Format("Bom !") bar.Start() for i := range clients { wg.Add(1) go doPut(context.Background(), clients[i], requests) } pdoneC := printReport(results) go func() { for i := 0; i < putTotal; i++ { if seqKeys { binary.PutVarint(k, int64(i%keySpaceSize)) } else { binary.PutVarint(k, int64(rand.Intn(keySpaceSize))) } requests <- v3.OpPut(string(k), v) } close(requests) }() if compactInterval > 0 { go func() { for { time.Sleep(compactInterval) compactKV(clients) } }() } wg.Wait() bar.Finish() close(results) <-pdoneC }
func indexAndSaveHits(ts *index.TokenSetSearcher, hits []HitInfo, idxs []int, saveFullHit func(*HitInfo) error) error { rank := 0 var bar *pb.ProgressBar if terminal.IsTerminal(int(os.Stdout.Fd())) { bar = pb.New(len(idxs)) bar.Start() } for i := range idxs { hit := &hits[idxs[i]] if i > 0 && hit.StaticScore < hits[idxs[i-1]].StaticScore { rank = i } hit.StaticRank = rank if err := saveFullHit(hit); err != nil { return err } var desc, readme string desc, hit.Description = hit.Description, "" readme, hit.ReadmeData = hit.ReadmeData, "" hit.Imported = nil hit.TestImported = nil var nameTokens stringsp.Set nameTokens = AppendTokens(nameTokens, []byte(hit.Name)) var tokens stringsp.Set tokens.Add(nameTokens.Elements()...) tokens = AppendTokens(tokens, []byte(hit.Package)) tokens = AppendTokens(tokens, []byte(desc)) tokens = AppendTokens(tokens, []byte(readme)) tokens = AppendTokens(tokens, []byte(hit.Author)) for _, word := range hit.Exported { AppendTokens(tokens, []byte(word)) } ts.AddDoc(map[string]stringsp.Set{ IndexTextField: tokens, IndexNameField: nameTokens, IndexPkgField: stringsp.NewSet(hit.Package), }, *hit) if bar != nil { bar.Increment() } } if bar != nil { bar.FinishPrint("Indexing finished!") } DumpMemStats() return nil }
func (r *Restore) downloadRuns() { defer close(r.apply) for br := range r.download { log.Printf("Downloading %s", br.DownloadURL) // create a tempdir dir, err := ioutil.TempDir("", "backup") if err != nil { log.Fatal("Failed to create TempDir: ", err) } log.Printf("Writing to tempdir of %s", dir) br.ExtractedDir = dir // read our backuprun into the tempdir, via gzip | xbstream cmd := exec.Command("xbstream", "-x", "-C", dir) resp, err := http.Get(br.DownloadURL) if err != nil { log.Fatal("Failed to fetch backup run: ", err) } defer resp.Body.Close() progress := pb.New(int(resp.ContentLength)).SetUnits(pb.U_BYTES) progress.SetWidth(80) progress.Start() pbreader := progress.NewProxyReader(resp.Body) reader, err := gzip.NewReader(pbreader) if err != nil { log.Fatal("Failed to set up gzip reader: ", err) } cmd.Stdin = reader cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { log.Fatal("xbstream failed: ", err) } r.apply <- br } }
// CheckMetadata downloads the metadata about all of the files currently // stored on Drive and compares it with the local cache. func (gd *GDrive) CheckMetadata(filename string, report func(string)) error { idToFile, err := gd.getIdToFile(filename) if err != nil { return err } // This will almost certainly take a while, so put up a progress bar. var bar *pb.ProgressBar if !gd.quiet { bar = pb.New(len(idToFile)) bar.ShowBar = true bar.ShowCounters = false bar.Output = os.Stderr bar.Prefix("Checking metadata cache: ") bar.Start() } err = gd.runQuery("trashed=false", func(f *drive.File) { if file, ok := idToFile[f.Id]; ok { df := newFile(f.Title, f) if !filesEqual(df, file) { report(fmt.Sprintf("%s: metadata mismatch.\nLocal: %+v\nDrive: %+v", file.Path, file, df)) } if bar != nil { bar.Increment() } delete(idToFile, f.Id) } else { // It'd be preferable to have "sharedWithMe=false" included in // the query string above, but the combination of that with // "trashed=false" seems to lead to no results being returned. if f.Shared == false { report(fmt.Sprintf("%s: found on Drive, not in local cache [%+v]", f.Title, f)) } } }) for _, f := range idToFile { report(fmt.Sprintf("%s: found in local cache, not on Drive [%+v]", f.Path, f)) } if bar != nil { bar.Finish() } return nil }
func sendFileToRemoteHost(client *ssh.Client, limit int64, sourceFile, targetUser, targetHost, targetFile string) { session, err := client.NewSession() if err != nil { log.Fatalln("Failed to create session: " + err.Error()) } defer session.Close() go func() { iw, err := session.StdinPipe() if err != nil { log.Fatalln("Failed to create input pipe: " + err.Error()) } w := flowrate.NewWriter(iw, limit) src, srcErr := os.Open(sourceFile) if srcErr != nil { log.Fatalln("Failed to open source file: " + srcErr.Error()) } srcStat, statErr := src.Stat() if statErr != nil { log.Fatalln("Failed to stat file: " + statErr.Error()) } fmt.Fprintln(w, "C0644", srcStat.Size(), filepath.Base(sourceFile)) if srcStat.Size() > 0 { bar := pb.New(int(srcStat.Size())) bar.Units = pb.U_BYTES bar.ShowSpeed = true bar.Start() wp := io.MultiWriter(w, bar) fmt.Printf("Transferring %s to %s@%s:%s\n", sourceFile, targetUser, targetHost, targetFile) fmt.Printf("Speed limited to %d bytes/sec\n", limit) io.Copy(wp, src) bar.Finish() fmt.Fprint(w, "\x00") w.Close() } else { fmt.Printf("Transferred empty file %s to %s@%s:%s\n", sourceFile, targetUser, targetHost, targetFile) fmt.Fprint(w, "\x00") w.Close() } }() if err := session.Run(fmt.Sprintf("scp -t %s", targetFile)); err != nil { log.Fatalln("Failed to run: " + err.Error()) } }
// Converts directory to CBZ func (c *Convertor) convertDirectory(path string) { c.Workdir, _ = ioutil.TempDir(os.TempDir(), "cbc") images := c.getImagesFromPath(path) c.Ncontents = len(images) c.CurrContent = 0 if !c.Opts.Quiet { bar = pb.New(c.Ncontents) bar.ShowTimeLeft = false bar.Prefix(fmt.Sprintf("Converting %d of %d: ", c.CurrFile, c.Nfiles)) bar.Start() } for index, img := range images { c.CurrContent++ if !c.Opts.Quiet { bar.Increment() } f, err := os.Open(img) if err != nil { fmt.Fprintf(os.Stderr, "Error Open: %v\n", err.Error()) continue } i, err := c.decodeImage(f, img) if err != nil { fmt.Fprintf(os.Stderr, "Error Decode: %v\n", err.Error()) continue } if !c.Opts.RGB && !c.isGrayScale(i) { i = c.TransformImage(i) c.encodeImage(i, filepath.Join(c.Workdir, filepath.Base(img))) continue } f.Close() if i != nil { throttle <- 1 wg.Add(1) go c.convertImage(i, index, img) } } wg.Wait() }
func (s *Server) uploadMigration(migrations []Migration) { src, err := os.OpenFile(tmpDir+"/migrations.tar.gz", os.O_RDONLY, 0644) if err != nil { exitf("failed to open %s/migrations.tar.gz: %s", tmpDir, err) } defer func() { src.Close() }() fi, err := src.Stat() if err != nil { exitf("failed to retrieve file info of %s: %s", src.Name(), err) } s.initSetUp() session := s.getSession() defer session.Close() go func() { dst, err := session.StdinPipe() if err != nil { exitf("failed to get StdinPipe: %s", err) } defer dst.Close() bar := pb.New(int(fi.Size())).SetUnits(pb.U_BYTES) bar.Start() defer bar.Finish() dstw := io.MultiWriter(bar, dst) _, err = fmt.Fprintln(dst, "C0644", fi.Size(), "migrations.tar.gz") if err != nil { exitf("failed to open migrations.tar.gz: %s", err) } _, err = io.Copy(dstw, src) if err != nil { exitf("failed to upload migrations.tar.gz: %s", err) } _, err = fmt.Fprint(dst, "\x00") if err != nil { exitf("failed to close migrations.tar.gz: %s", err) } }() if output, err := session.CombinedOutput("/usr/bin/scp -qrt harp/" + cfg.App.Name); err != nil { exitf("Failed to run: %s %s", string(output), err) } }