func upload(c cli.Command) { var path string switch len(c.Args()) { case 1: path = c.Arg(0).String() case 2: container = c.Arg(0).String() path = c.Arg(1).String() } if blank(container) || blank(path) { log.Fatal(errorNotEnough) } f, err := os.Open(path) if err != nil { log.Fatal(err) } stat, err := os.Stat(path) if err != nil { log.Fatal(err) } ext := filepath.Ext(path) mimetype := mime.TypeByExtension(ext) bar := pb.New64(stat.Size()).SetUnits(pb.U_BYTES) bar.Start() reader := io.TeeReader(f, bar) if err := api.Container(container).Upload(reader, stat.Name(), mimetype); err != nil { log.Fatal(err) } fmt.Printf("uploaded to %s\n", container) }
func initUnknownBar() { bar = pb.New64(0).SetUnits(pb.U_BYTES).SetRefreshRate(time.Millisecond * 10) bar.ShowSpeed = true bar.ShowCounters = true bar.ShowBar = false bar.Start() }
func NewCopy(albumID, title, url string) (*Copy, error) { resp, err := http.Get(url) if err != nil { return nil, err } if resp.StatusCode != 200 { resp.Body.Close() return nil, fmt.Errorf("GET %s -> %s (Invalid status code)", url, resp.Status) } contentType := resp.Header.Get("content-type") contentSize := resp.ContentLength if !strings.HasPrefix(contentType, "image/") { resp.Body.Close() return nil, fmt.Errorf("GET %s -> %s (Invalid content type)", url, contentType) } progressBar := pb.New64(contentSize) progressBar.SetUnits(pb.U_BYTES) progressBar.Prefix(title) return &Copy{ AlbumID: albumID, Title: title, URL: url, ContentType: contentType, ContentLength: contentSize, client: NewClient(), response: resp, reader: progressBar.NewProxyReader(bufio.NewReaderSize(resp.Body, 4096*10)), progressBar: progressBar, }, nil }
func NewUploader(albumID, title, path string) (*Uploader, error) { contentType, err := getContentType(path) if err != nil { return nil, err } contentSize, err := getContentSize(path) if err != nil { return nil, err } file, err := os.Open(path) if err != nil { return nil, err } progressBar := pb.New64(contentSize) progressBar.SetUnits(pb.U_BYTES) progressBar.Prefix(title) return &Uploader{ AlbumID: albumID, Title: title, FilePath: path, ContentType: contentType, ContentLength: contentSize, client: NewClient(), file: file, reader: progressBar.NewProxyReader(bufio.NewReaderSize(file, 4096*10)), progressBar: progressBar, }, nil }
func TestThatProgressBarCanRewindProgress(t *testing.T) { nBytesToDownload := int64(1 << 8) progressBar := pb.New64(nBytesToDownload).SetUnits(pb.U_BYTES) progressBar.Output = new(DevNullWriter) progressBar.Start() reader := bytes.NewReader(make([]byte, nBytesToDownload)) dst := make([]byte, 1<<7) bcr := &byteCountingReader{ R: reader, } read, _ := bcr.Read(dst) if 1<<7 != read { t.Fatalf("Expected to read %d bytes but read %d byte[s]", 1<<7, read) } // Pretend a failure happened, rewind progress progressBar.Add64(int64(0 - bcr.bytesRead)) // reset variables reader = bytes.NewReader(make([]byte, nBytesToDownload)) bcr = &byteCountingReader{ R: reader, } read, _ = bcr.Read(dst) read, _ = bcr.Read(dst) if len(dst) != read || nBytesToDownload != int64(bcr.bytesRead) { t.Fatalf("Expected to read %d bytes but read %d byte[s] and "+ "to accumulate %d bytes but accumulated %d byte[s]", len(dst), read, nBytesToDownload, bcr.bytesRead) } }
// Start starts showing progress func (t *TextProgress) Start(label string, total float64) { t.pbar = pb.New64(int64(total)) t.pbar.ShowSpeed = true t.pbar.Units = pb.U_BYTES t.pbar.Prefix(label) t.pbar.Start() }
func main() { stdout := os.Stdout os.Stdout = os.Stderr executable, err := exec.LookPath("dd") if err != nil { fmt.Printf("ddp: failed to find dd: %s\n", err) os.Exit(1) } // Create pipe attached to a reader: output, input, err := os.Pipe() if err != nil { panic(err) } // Setup process with _the_ three file descriptors: files := []*os.File{ os.Stdin, stdout, input, } process, err := os.StartProcess(executable, os.Args, &os.ProcAttr{ Files: files, }) if err != nil { fmt.Printf("ddp: failed to start dd: %s\n", err) os.Exit(1) } Trap(process) target := GuessTargetSize(os.Args) bar := pb.New64(target) bar.SetUnits(pb.U_BYTES) bar.ShowSpeed = true bar.Output = os.Stderr started := false OutputScanner(io.Reader(output), os.Stderr, func(bytes int64) { if !started { started = true bar.Start() } bar.Set64(bytes) }) Interrupter(process, pb.DEFAULT_REFRESH_RATE) state, err := process.Wait() if err != nil { panic(err) } if started && state.Success() { bar.Finish() } output.Close() if !state.Success() { os.Exit(1) } }
func printBytes(bytesRead chan int, total int64, done chan bool) { var r int var n int64 var ok bool bar := pb.New64(total) bar.ShowSpeed = true bar.ShowFinalTime = true bar.SetUnits(pb.U_BYTES) bar.Start() for { r, ok = <-bytesRead n += int64(r) bar.Set64(n) if !ok { break } } bar.Finish() done <- true }
func acquireProgressBar(t time.Duration) (*pb.ProgressBar, <-chan time.Time) { pb := pb.New64(int64(t.Seconds())) pb.ShowCounters = false pb.ShowPercent = false pb.Start() return pb, time.Tick(time.Second) }
func newBar(count int64) *pb.ProgressBar { bar := pb.New64(count) bar.ShowTimeLeft = true bar.ShowSpeed = true return bar }
func main() { if len(os.Args) != 3 { logrus.Fatalln("Usage: ddgo <src> <dest>") } src := os.Args[1] dst := os.Args[2] fd, err := os.OpenFile(src, os.O_RDONLY, 0) if err != nil { logrus.Fatalln("failed to open src:", err) } defer fd.Close() srcLen, err := fd.Seek(0, 2) if err != nil { logrus.Fatalln(err) } _, err = fd.Seek(0, 0) if err != nil { logrus.Fatalln(err) } bar := pb.New64(srcLen) bar.SetUnits(pb.U_BYTES) bar.ShowSpeed = true bar.ShowTimeLeft = true out, err := os.OpenFile(dst, os.O_WRONLY|syscall.O_DIRECT, 0777) if err != nil { logrus.Fatalln("failed to open dst:", err) } defer out.Close() dstLen, err := out.Seek(0, 2) if err != nil { logrus.Fatalln(err) } _, err = out.Seek(0, 0) if err != nil { logrus.Fatalln(err) } if dstLen < srcLen { bar.Total = dstLen } bar.Start() if dstLen < srcLen { logrus.Warnln("destination device too small, not all bytes will be copied") _, err = io.Copy(io.MultiWriter(bar, out), io.LimitReader(fd, dstLen)) } else { _, err = io.Copy(io.MultiWriter(bar, out), fd) } if err != nil { logrus.Fatalln("copy failed:", err) } bar.Finish() fd.Close() out.Close() }
func CreateFileProgress(f *os.File) (*pb.ProgressBar, error) { if fi, err := f.Stat(); err == nil { bar := pb.New64(fi.Size()) bar.SetUnits(pb.U_BYTES) return bar, nil } else { return nil, err } }
func (client *s3client) newProgressBar(total int64) *pb.ProgressBar { progress := pb.New64(total) progress.Output = client.progressOutput progress.ShowSpeed = true progress.Units = pb.U_BYTES progress.NotPrint = true return progress.SetWidth(80) }
func getProgressBar(nBytes int64) *pb.ProgressBar { if quiet { return nil } progressBar := pb.New64(nBytes).SetUnits(pb.U_BYTES) progressBar.ShowBar = true progressBar.Output = os.Stderr progressBar.Start() return progressBar }
func (pm *ProgressMeter) Start(total int64) { pm.bar = pb.New64(total) pm.bar.Prefix(pm.prefix) pm.bar.SetMaxWidth(70) pm.bar.SetUnits(pb.U_BYTES) pm.bar.SetRefreshRate(200 * time.Millisecond) pm.bar.Output = pm.out pm.bar.Start() pm.total = total }
func ShowProgressBar(totalBytes int64) *pb.ProgressBar { log.Debugf("Creating progress bar for %d bytes", totalBytes) bar := pb.New64(totalBytes) bar.SetMaxWidth(70) bar.ShowCounters = false bar.ShowSpeed = true bar.SetUnits(pb.U_BYTES) bar.Format("[##-]") bar.Prefix(" ") bar.Start() return bar }
// NewProgressBar initializes new progress bar based on size of file func NewProgressBar(file *os.File) *pb.ProgressBar { fi, err := file.Stat() total := int64(0) if err == nil { total = fi.Size() } bar := pb.New64(total) bar.SetUnits(pb.U_BYTES) return bar }
func initBar(f *os.File) { fi, err := f.Stat() if err != nil { fmt.Println("Could not stat", f.Name()) os.Exit(1) } bar = pb.New64(fi.Size()).SetUnits(pb.U_BYTES).SetRefreshRate(time.Millisecond * 10) bar.ShowPercent = true bar.ShowSpeed = true bar.ShowTimeLeft = true bar.Start() }
func (m *SymlinkWebotsManager) uncompressFromHttp(v WebotsVersion, addr string) error { dest := path.Join(m.workpath, v.String()) err := os.RemoveAll(dest) if err != nil { return err } err = os.MkdirAll(dest, 0775|os.ModeSetgid) if err != nil { return err } resp, err := http.Get(addr) if err != nil { return err } var netReader io.Reader = resp.Body if resp.ContentLength >= 0 { //adds a progress bar progress := make(chan int64) go func() { bar := pb.New64(resp.ContentLength) bar.Format("[=>_]") bar.Start() for n := range progress { bar.Add64(n) } bar.FinishPrint(fmt.Sprintf("Downloaded and extracted %s", addr)) }() netReader = &ProgressReader{ reader: resp.Body, progress: progress, } defer close(progress) } tarReader := tar.NewReader(bzip2.NewReader(netReader)) for { fileHeader, err := tarReader.Next() if err == io.EOF { break } if err != nil { return err } err = m.extractFile(v, fileHeader, tarReader) if err != nil { return fmt.Errorf("Cannot extract %s: %s", fileHeader.Name, err) } } return nil }
// newProgressBar - instantiate a progress bar. func newProgressBar(total int64) *progressBar { // Progress bar speific theme customization. console.SetColor("Bar", color.New(color.FgGreen, color.Bold)) pgbar := progressBar{} // get the new original progress bar. bar := pb.New64(total) // Set new human friendly print units. bar.SetUnits(pb.U_BYTES) // Refresh rate for progress bar is set to 125 milliseconds. bar.SetRefreshRate(time.Millisecond * 125) // Do not print a newline by default handled, it is handled manually. bar.NotPrint = true // Show current speed is true. bar.ShowSpeed = true // Custom callback with colorized bar. bar.Callback = func(s string) { console.Print(console.Colorize("Bar", "\r"+s)) } // Use different unicodes for Linux, OS X and Windows. switch runtime.GOOS { case "linux": // Need to add '\x00' as delimiter for unicode characters. bar.Format("┃\x00▓\x00█\x00░\x00┃") case "darwin": // Need to add '\x00' as delimiter for unicode characters. bar.Format(" \x00▓\x00 \x00░\x00 ") default: // Default to non unicode characters. bar.Format("[=> ]") } // Start the progress bar. if bar.Total > 0 { bar.Start() } // Copy for future pgbar.ProgressBar = bar // Return new progress bar here. return &pgbar }
func acceptIncomingFileTransfer(peerAddress net.IP, offering transmitFileRequest) { remoteAddressHostPort := peerAddress.String() + ":13160" connection, err := net.Dial("tcp4", remoteAddressHostPort) if err != nil { log.Fatal(err) } defer connection.Close() fmt.Println("Connection established") if *outputDirectory != "" { filePath = *outputDirectory + "/" } if *outputFileName != "" { filePath += *outputFileName } else { filePath += offering.FileName } fmt.Printf("Saving %s from %s to %s\n", offering.FileName, peerAddress.String(), filePath) file, error := os.Create(filePath) if error != nil { log.Fatal(error) } defer file.Close() progressBar := pb.New64(offering.Size).SetUnits(pb.U_BYTES) progressBar.Prefix(offering.FileName + ": ") progressBar.Start() //Create the writer proxy writerProxy := io.MultiWriter(file, progressBar) written, error := io.CopyN(writerProxy, connection, offering.Size) if error != nil { log.Fatal(error) } fmt.Printf("Recieved '%s' (Size: %s) from %s \n", offering.FileName, humanize.Bytes(uint64(written)), connection.RemoteAddr().String()) if !compareHash(offering.Hash, filePath) { log.Fatal("Hash mismatch: The file was not transferred correctly!") } }
func doUpload(ui packer.Ui, url string, file string) error { data, err := os.Open(file) if err != nil { return err } defer data.Close() fileInfo, err := data.Stat() if err != nil { return err } bar := pb.New64(fileInfo.Size()).SetUnits(pb.U_BYTES) bar.ShowSpeed = true bar.Callback = ui.Message bar.RefreshRate = time.Second * 5 bar.SetWidth(40) reader := bar.NewProxyReader(data) req, err := http.NewRequest("PUT", url, reader) if err != nil { return err } req.Header.Add("Content-Type", "application/x-www-form-urlencoded") req.ContentLength = fileInfo.Size() tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{Transport: tr} bar.Start() res, err := client.Do(req) bar.Finish() if err != nil { return err } defer res.Body.Close() return nil }
// Fetch fetches target from path specified in opts func Fetch(opts *Fetcher, target string, showProgress bool) error { targetPath := filepath.Dir(opts.Destination) writable, err := targetPathWritable(targetPath) if !writable || err != nil { fmt.Printf("Cannot write to target `%s`. Please check that it exists and is writable.\n", targetPath) return err } temp, err := ioutil.TempFile(targetPath, fmt.Sprintf(".%s-", opts.Project)) if err != nil { return err } defer temp.Close() bar := pb.New64(*targetSize(opts, target)).SetUnits(pb.U_BYTES) if showProgress { bar.Start() } etag := readMD5Sum(opts.Destination) writer := &ProgressWriter{temp, bar} downloader := s3manager.NewDownloader(&s3manager.DownloadOptions{ S3: opts.S3, }) _, err = downloader.Download(writer, &s3.GetObjectInput{ Bucket: aws.String(opts.Bucket), Key: opts.Key(target), IfNoneMatch: aws.String(etag), }) if err != nil { os.Remove(temp.Name()) if reqErr, ok := err.(awserr.RequestFailure); ok { if reqErr.StatusCode() == 304 { bar.Set64(bar.Total) bar.FinishPrint("Using local copy.") return nil } return reqErr } return err } return os.Rename(temp.Name(), opts.Destination) }
func listenToStartFileTransfer(fileTransmittedChannel chan bool) { connection, error := net.Listen("tcp4", ":13160") if error != nil { log.Fatal(error) } defer connection.Close() for { ln, error := connection.Accept() if error != nil { log.Fatal(error) } file, err := os.Open(*fileName) if err != nil { log.Fatal(err) } defer file.Close() //Get a stat of the file and instanciate the progress-bar fileStat, _ := file.Stat() progressBar := pb.New64(fileStat.Size()).SetUnits(pb.U_BYTES) progressBar.Prefix(*fileName + ": ") progressBar.Start() fmt.Printf("Writing %s to %s\n", *fileName, ln.RemoteAddr().String()) //Create a proxy for the file reader fileReaderProxy := progressBar.NewProxyReader(file) written, error := io.Copy(ln, fileReaderProxy) if error != nil { log.Fatal(error) } fmt.Printf("Written %s to %s\n", humanize.Bytes(uint64(written)), ln.RemoteAddr().String()) //Send true to the channel fileTransmittedChannel <- true } }
func TestThatProgressBarsExpectedTotalCanChange(t *testing.T) { nBytesToDownload := int64(1 << 8) progressBar := pb.New64(nBytesToDownload).SetUnits(pb.U_BYTES) progressBar.Output = new(DevNullWriter) progressBar.Start() progressBar.Add64(int64(1 << 7)) if 1<<8 != progressBar.Total { t.Fatalf("Expected the progress bar's total to be %bb but was %bb", 1<<8, progressBar.Total) } progressBar.Total = int64(1 << 9) if 1<<8 != progressBar.Total-progressBar.Add64(1<<7) { t.Fatalf("Expected the progress bar's current progress to be 0b%b but was 0b%b", 1<<8, progressBar.Add(0)) } progressBar.Finish() }
func dd(srcPath, dstPath string, bs int) error { if bs == 0 { bs = defaultBufSize } src, err := os.Open(srcPath) if err != nil { return err } defer src.Close() if err := sanityCheckDst(dstPath); err != nil { return err } dst, err := os.Create(dstPath) if err != nil { return err } defer func() { dst.Sync() dst.Close() }() // huge default bufsize w := NewFixedBuffer(dst, bs) stat, err := src.Stat() if err != nil { return err } pbar := pb.New64(stat.Size()).SetUnits(pb.U_BYTES) pbar.Start() mw := io.MultiWriter(w, pbar) _, err = io.Copy(mw, src) return err }
func main() { // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and // my-objectname are dummy values, please replace them with original values. // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. // This boolean value is the last argument for New(). // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically // determined based on the Endpoint value. s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) if err != nil { log.Fatalln(err) } reader, err := s3Client.GetObject("my-bucketname", "my-objectname") if err != nil { log.Fatalln(err) } defer reader.Close() objectInfo, err := reader.Stat() if err != nil { log.Fatalln(err) } // progress reader is notified as PutObject makes progress with // the read. For partial resume put object, progress reader is // appropriately advanced. progress := pb.New64(objectInfo.Size) progress.Start() n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, "application/octet-stream", progress) if err != nil { log.Fatalln(err) } log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.") }
func (r *Repo) DownloadFile(name string) error { compressed := strings.HasSuffix(name, ".gz") output, err := os.Create(filepath.Join(r.Path, strings.TrimSuffix(name, ".gz"))) if err != nil { return err } defer output.Close() fmt.Printf("Downloading %s...\n", name) tr := &http.Transport{ DisableCompression: true, Proxy: http.ProxyFromEnvironment, } client := &http.Client{Transport: tr} resp, err := client.Get(bucket_url + name) if err != nil { return err } defer resp.Body.Close() bar := pb.New64(resp.ContentLength).SetUnits(pb.U_BYTES) bar.Start() proxyReader := bar.NewProxyReader(resp.Body) var reader io.Reader = proxyReader if compressed { gzipReader, err := gzip.NewReader(proxyReader) if err != nil { return err } reader = gzipReader } _, err = io.Copy(output, reader) bar.Finish() if err != nil { return err } return nil }
func newProgressBar(total int64) *pb.ProgressBar { pbf := pb.New64(total) pbf.Start() return pbf }
// DoDuplicate search duplicates func DoDuplicate(filesList []string, newFile string) error { m := map[uint64]bool{} readed := 0 added := 0 out, err := os.Create(newFile) if err != nil { return err } defer out.Close() writer := bufio.NewWriter(out) for _, srcFile := range filesList { total, err := s.CalculateLines(srcFile) if err != nil { return err } in, err := os.Open(srcFile) if err != nil { return err } defer in.Close() scanner := bufio.NewScanner(in) // Progress Bar bar := pb.New64(total) bar.ShowPercent = true bar.ShowBar = true bar.ShowCounters = true bar.ShowTimeLeft = true //bar.SetRefreshRate(time.Millisecond * 100) //bar.Format("<.- >") bar.Start() for scanner.Scan() { line := scanner.Text() lineHash := s.GetHashFvn64(line) readed++ if _, seen := m[lineHash]; !seen { fmt.Fprintln(writer, line) m[lineHash] = true added++ } bar.Increment() } bar.Finish() if err := scanner.Err(); err != nil { return err } } if err := writer.Flush(); err != nil { return err } fmt.Println("\nProcessed files:") fmt.Println("-------------------------------------------") for _, srcFile := range filesList { fmt.Println(srcFile) } fmt.Println("-------------------------------------------") fmt.Printf("|%-20s|%20d|\n", "Readed", readed) fmt.Printf("|%-20s|%20d|\n", "Removed", (readed - added)) fmt.Printf("|%-20s|%20d|\n", "Result", added) fmt.Println("-------------------------------------------") fmt.Println() return nil }