func Example() { writer := uilive.New() writer.Start() for i := 0; i <= 100; i++ { fmt.Fprintf(writer, "Downloading.. (%d/%d) GB\n", i, 100) time.Sleep(time.Millisecond * 5) } fmt.Fprintln(writer, "Finished: Downloaded 100GB") time.Sleep(time.Millisecond) }
// New returns a new progress bar with defaults func New() *Progress { return &Progress{ Width: Width, Out: Out, Bars: make([]*Bar, 0), RefreshInterval: RefreshInterval, lw: uilive.New(), stopChan: make(chan struct{}), } }
func run(universe *data.Universe) int { writer := uilive.New() writer.Start() defer writer.Stop() for { drawScreen(writer, universe) universe = evolveUniverse(universe) time.Sleep(250 * time.Millisecond) } return 0 }
func Example() { writer := uilive.New() // start listening to updates and render writer.Start() for i := 0; i <= 100; i++ { fmt.Fprintf(writer, "Downloading.. (%d/%d) GB\n", i, 100) time.Sleep(time.Millisecond * 5) } fmt.Fprintln(writer, "Finished: Downloaded 100GB") writer.Stop() // flush and stop rendering }
// Start starts the rendering the progress of progress bars. It listens for updates using `bar.Set(n)` and new bars when added using `AddBar` func (p *Progress) Start() { lw := uilive.New() lw.Out = p.Out lw.RefreshInterval = p.RefreshInterval go func() { for { for _, bar := range p.Bars { fmt.Fprintln(lw, bar.String()) } lw.Flush() lw.Wait() } }() }
func newStateUI(verbose bool) *stateUI { s := &stateUI{ HeaderSink: make(chan string), EventSink: make(chan scheduler.Event), MessageSink: make(chan string), states: make(map[string]string), writer: uilive.New(), stopChan: make(chan bool), verbose: verbose, autoConfirm: false, } s.bypassWriter = s.writer.Bypass() //s.writer.Start() go s.processSinks() return s }
func main() { writer := uilive.New() // start listening for updates and render writer.Start() for _, f := range []string{"Foo.zip", "Bar.iso"} { for i := 0; i <= 50; i++ { fmt.Fprintf(writer, "Downloading %s.. (%d/%d) GB\n", f, i, 50) time.Sleep(time.Millisecond * 25) } fmt.Fprintf(writer.Bypass(), "Downloaded %s\n", f) } fmt.Fprintln(writer, "Finished: Downloaded 100GB") writer.Stop() // flush and stop rendering }
func main() { // USR1 Will change mode to passthough to behave just like tail -f, counts and rates continue in this mode var passthroughChan = make(chan os.Signal, 1) signal.Notify(passthroughChan, syscall.SIGUSR1) // USR2 Will reset the counters var resetChan = make(chan os.Signal, 1) signal.Notify(resetChan, syscall.SIGUSR2) patternArgs := os.Args[1:] logCounters := []*LogCounter{} for _, patternArg := range patternArgs { r, err := regexp.Compile(patternArg) if err != nil { log.Println(err) os.Exit(1) } logCounters = append(logCounters, NewLogCounter(r)) } // If no parameters are given, count each distinct line var useDistinct = false if len(logCounters) == 0 { useDistinct = true } repaintInterval, err := time.ParseDuration(config["repaint_interval"]) if err != nil { repaintInterval = 1 * time.Second } repaintTicker := time.NewTicker(repaintInterval) writer := uilive.New() // reader := bufio.NewReader(os.Stdin) scanner := bufio.NewScanner(os.Stdin) lineChan := make(chan []byte) go func(scanner *bufio.Scanner, lineChan chan []byte) { for scanner.Scan() { var scannerBytes = scanner.Bytes() var line = make([]byte, len(scannerBytes)) copy(line, scannerBytes) lineChan <- line } close(lineChan) }(scanner, lineChan) writer.Start() defer writer.Stop() var totalLinesRead = 0 var passthrough = false defer repaint(writer, logCounters, totalLinesRead) for { select { case <-resetChan: for _, cnt := range logCounters { cnt.Reset() totalLinesRead = 0 repaint(writer, logCounters, totalLinesRead) } case <-passthroughChan: passthrough = !passthrough case <-repaintTicker.C: if !passthrough { repaint(writer, logCounters, totalLinesRead) } case line, ok := <-lineChan: if !ok { return } if passthrough { log.Println(string(line)) } if useDistinct { p := fmt.Sprintf("^%s$", regexp.QuoteMeta(string(line))) var patternExists = false for _, c := range logCounters { if c.p.String() == p { patternExists = true break } } if !patternExists { r := regexp.MustCompile(p) c := NewLogCounter(r) logCounters = append(logCounters, c) } } for _, c := range logCounters { indexMatches := c.p.FindAllIndex(line, -1) if indexMatches != nil { c.Incr(int64(len(indexMatches))) } } totalLinesRead++ } } }
func (opts *serviceCheckReleaseOpts) RunE(cmd *cobra.Command, args []string) error { if len(args) != 0 { return errorWantedNoArgs } if opts.releaseID == "" { return fmt.Errorf("-r, --release-id is required") } if opts.noFollow { job, err := opts.API.GetRelease(noInstanceID, jobs.JobID(opts.releaseID)) if err != nil { return err } buf, err := json.MarshalIndent(job, "", " ") if err != nil { return err } _, err = os.Stdout.Write(buf) return err } var ( w io.Writer = os.Stdout stop = func() {} ) if !opts.noTty && isatty.IsTerminal(os.Stdout.Fd()) { liveWriter := uilive.New() liveWriter.Start() var stopOnce sync.Once w, stop = liveWriter, func() { stopOnce.Do(liveWriter.Stop) } } var ( job jobs.Job err error prevStatus string lastHeartbeatDatabase time.Time lastHeartbeatLocal = time.Now() retryCount = 0 lastSucceeded = time.Now() ) for range time.Tick(time.Second) { if retryCount > 0 { fmt.Fprintf(w, "Last status (%s): %s\n", lastSucceeded.Format(time.Kitchen), prevStatus) fmt.Fprintf(w, "Service unavailable. Retrying (#%d) ...\n", retryCount) } job, err = opts.API.GetRelease(noInstanceID, jobs.JobID(opts.releaseID)) if err != nil { if err, ok := errors.Cause(err).(*transport.APIError); ok && err.IsUnavailable() { if time.Since(lastSucceeded) > retryTimeout { stop() fmt.Fprintln(os.Stdout, "Giving up; you can try again with") fmt.Fprintf(os.Stdout, " fluxctl check-release -r %s\n", opts.releaseID) fmt.Fprintln(os.Stdout) break } retryCount++ continue } fmt.Fprintf(w, "Status: error querying release.\n") // error will get printed below break } lastSucceeded = time.Now() retryCount = 0 status := "Waiting for job to be claimed..." if job.Status != "" { status = job.Status } // Checking heartbeat is a bit tricky. We get a timestamp in database // time, which may be radically different to our time. I've chosen to // check liveness by marking local time whenever the heartbeat time // changes. Going long enough without updating that local timestamp // triggers the warning. if !job.Claimed.IsZero() { if job.Heartbeat != lastHeartbeatDatabase { lastHeartbeatDatabase = job.Heartbeat lastHeartbeatLocal = time.Now() } if delta := time.Since(lastHeartbeatLocal); delta > largestHeartbeatDelta { status += fmt.Sprintf(" -- no heartbeat in %s; worker may have crashed", delta) } } if status != prevStatus { fmt.Fprintf(w, "Status: %s\n", status) } prevStatus = status if job.Done { break } } stop() if err != nil { return err } spec := job.Params.(jobs.ReleaseJobParams) fmt.Fprintf(os.Stdout, "\n") if !job.Success { fmt.Fprintf(os.Stdout, "Here's as far as we got:\n") } else if spec.Kind == flux.ReleaseKindPlan { fmt.Fprintf(os.Stdout, "Here's the plan:\n") } else { fmt.Fprintf(os.Stdout, "Here's what happened:\n") } for i, msg := range job.Log { fmt.Fprintf(os.Stdout, " %d) %s\n", i+1, msg) } if spec.Kind == flux.ReleaseKindExecute { fmt.Fprintf(os.Stdout, "Took %s\n", job.Finished.Sub(job.Submitted)) } return nil }