// Returns the max amount of metrics/rows we can display // This is the # of rows in the terminal minus 2 (for time / stats + grid header) // To elimate any weird cases where the terminal is super short, we'll return a min rows of 3 func maxRows() uint { n := goterm.Height() - 2 if n < 3 { n = 3 } return uint(n) }
func PrintImage(img image.Image) { width := goterm.Width() height := goterm.Height() img = resize.Resize(uint(width), uint(height), img, resize.NearestNeighbor) buf := "" for y := 0; y < height; y++ { buf += "\n" for x := 0; x < width; x++ { r, g, b, _ := img.At(x, y).RGBA() grayColor := color.Gray16{Y: uint16((r + g + b) / 3)} pixelColor := 232 + (grayColor.Y / 255 / 16) buf += fmt.Sprintf("\033[38;5;#%dm█\033[m", pixelColor) } } goterm.Print(buf) goterm.Flush() }
func StatQueue(writeQ bool) error { var myresults results var wg sync.WaitGroup for id, sl := range record.RecordByThread { wg.Add(1) go func(tid string, rs []*record.Record) { defer wg.Done() for _, r := range rs { if !window.InCurrentWindow(r.Time) { continue } if len(r.Raw) < 151 { continue } txt := strings.Split(r.Raw[60:150], ">") if len(txt) < 2 { continue } tokens := strings.Split(txt[1], " ") // 60:140 to limit the scope of search, with reasonable margins if len(tokens) < 3 { continue } if writeQ { if ((tokens[1] == "Queue") && (tokens[2] == "command")) || ((tokens[1] == "Dequeue") && (tokens[2] == "and") && (tokens[3] == "execute")) { //2015/08/04 15:19:59.904847 magap302 masterag-11298 MDW INFO <SEI_MAAdminSequence.cpp#223 TID#13> Queue command 0x2aacbf979400 [Command#14015: kSEIBELibLoaded : BE->MAG:Notify the Master Agent that a lib has been loaded (version 1)^@] (from BENT0UCL4VXODY to masterag); Command sequence queue size: 0 backToken := strings.Split(r.Raw[len(r.Raw)-40:len(r.Raw)], " ") last := len(backToken) - 1 txt1 := strings.Join(backToken[last-2:last], " ") if txt1 == "queue size:" { if s, err := strconv.Atoi(backToken[last]); err == nil { myresults.Lock() myresults.HasRecords = append(myresults.HasRecords, &result{qSize: s, tid: tid, r: r}) myresults.Unlock() } } } } else { if (tokens[1] == "Scheduling") && (tokens[3] == "command") && (tokens[6] == "Read-only") { //2015/12/03 05:07:12.577382 magap302 masterag-32357 MDW INFO <SEI_MAAdminSequence.cpp#196 TID#13> Scheduling the command in the Read-only command sequencer pool. Pool queue size: 393 backToken := strings.Split(r.Raw[len(r.Raw)-40:len(r.Raw)], " ") last := len(backToken) - 1 txt1 := strings.Join(backToken[last-2:last], " ") if txt1 == "queue size:" { if s, err := strconv.Atoi(backToken[last]); err == nil { myresults.Lock() myresults.HasRecords = append(myresults.HasRecords, &result{qSize: s, tid: tid, r: r}) myresults.Unlock() } } } } } }(id, sl) } wg.Wait() sort.Sort(record.ByHasRecordTime{myresults.HasRecords}) // Build chart chart := tm.NewLineChart(tm.Width()-10, tm.Height()-10) data := new(tm.DataTable) data.AddColumn("Seconds") data.AddColumn("QMax") data.AddColumn("QMin") type prec struct { max int min int } t0 := myresults.HasRecords[0].(*result).r.Time tmax := myresults.HasRecords[len(myresults.HasRecords)-1].(*result).r.Time indexMax := int(tmax.Sub(t0).Seconds()) qmaxTime := time.Unix(0, 0) qmax := 0 m := make(map[int]prec) for _, v := range myresults.HasRecords { r := v.(*result) d := int(r.r.Time.Sub(t0).Seconds()) p, ok := m[d] if !ok { p = prec{r.qSize, r.qSize} } else { if p.max < r.qSize { p.max = r.qSize } if p.min > r.qSize { p.min = r.qSize } } m[d] = p if p.max > qmax { qmax = p.max qmaxTime = r.GetRecord().Time } } for t := 0; t <= int(indexMax); t++ { data.AddRow(float64(t), float64(m[t].max), float64(m[t].min)) } tm.Println(chart.Draw(data)) tm.Flush() fmt.Printf("Qmax=%d at %s\n", qmax, qmaxTime.Format(utils.DateFormat)) return nil }
func main() { var ( errCh = make(chan error, 16) receivedCh = make(chan int, 1024) sentCh = make(chan int, 1024) sent = 0 lastSent = 0 received = 0 lastReceived = 0 ) // read params (server address:port, qos settings, number of publishers, number of subscribers) flag.Parse() // generate topics in advance, so we can run the subscribers before starting to publish for i := 0; i < *numTop; i++ { newTopic() } // // subscribe to topics // // TODO: multiple subscribers. // initialise a timeout (if no messages are received in the given time since the last publish.) timeout := time.NewTimer(10 * time.Second) resetSubTimeout := func() { timeout.Reset(time.Duration(*subTimeout) * time.Second) } // discarding received messages messageHandler := func(client *mqtt.Client, m mqtt.Message) { if string(m.Payload()) == "hello world" { receivedCh <- 1 resetSubTimeout() // reset timeout } } // prepare filter from topics and qos filters := map[string]byte{} for _, topic := range getTopics() { fmt.Printf("Created topic %s with qos %v\n", topic, *qos) filters[topic] = byte(*qos) } // multisubscribers fmt.Println("Connecting subscribers...") for i := 0; i < *numSub; i++ { subscriber := newClient() if token := subscriber.client.Connect(); token.Wait() && token.Error() != nil { errCh <- token.Error() } defer subscriber.client.Disconnect(250) token := subscriber.client.SubscribeMultiple(filters, messageHandler) if token.Wait() && token.Error() != nil { errCh <- token.Error() } } // // Publish to topics // // set value for timeout timeout = time.NewTimer(time.Duration(*subTimeout) * time.Second) go func() { for { select { case <-timeout.C: errCh <- fmt.Errorf("Subscriber timeout.. no more messages?") } } }() // publishers for i := 0; i < *numPub; i++ { go func() { c := newClient() if token := c.client.Connect(); token.Wait() && token.Error() != nil { errCh <- token.Error() } defer c.client.Disconnect(100) // publish (sequential per client) topics := getTopics() for k := 0; k < *numMessages; k++ { topic := topics[k%len(topics)] token := c.client.Publish(topic, byte(*qos), *retained, "hello world") if token.Wait() && token.Error() != nil { errCh <- token.Error() } sentCh <- 1 time.Sleep(time.Duration(*pubDelay) * time.Millisecond) } }() } // creating fancy output // start := time.Now() redraw := time.NewTicker(100 * time.Millisecond) if *nograph { redraw = time.NewTicker(1 * time.Second) } row := 0.0 // table data := new(tm.DataTable) data.AddColumn("Time (sec)") data.AddColumn("Sent") data.AddColumn("Received") for { select { case <-redraw.C: if !*nograph { tm.Clear() tm.Printf("[Published : Received]: [%v : %v]\n\n", sent, received) row += 1.0 data.AddRow(row/10.0, float64(sent), float64(received)) tm.Print(tm.NewLineChart(tm.Width()-4, tm.Height()-6).Draw(data)) tm.Flush() } else { if sent != lastSent || received != lastReceived { fmt.Printf("[Published : Received]: [%v : %v]\n", sent, received) lastSent, lastReceived = sent, received } } case e := <-errCh: fmt.Println(e.Error()) return case s := <-sentCh: sent += s case r := <-receivedCh: received += r } } }
//Display the distribution for a given command func StatCmdDistribution(cmdName string) { for _, ss := range buildStats() { if ss.cmd == cmdName { // Build chart chart := tm.NewLineChart(tm.Width()-10, tm.Height()-33) data := new(tm.DataTable) data.AddColumn("CmdCount") data.AddColumn("Duration") for i, rec := range ss.records { if d, err := rec.GetCmdDuration(); err != nil { data.AddRow(float64(i), -1.) } else { data.AddRow(float64(i), float64(d.Nanoseconds()/1000000)) } } tm.Println(chart.Draw(data)) tm.Flush() // Statistics for that particular command { w := new(tabwriter.Writer) w.Init(os.Stdout, 20, 0, 2, ' ', tabwriter.AlignRight) fmt.Fprintln(w, "Command\tcount\tmiss\tmin (ms)\tmax (ms)\tavg (ms)\t95% (ms)\t") c := int64(len(ss.records) - ss.incomplete) i95 := c * 95 / 100 if c == 0 { c = 1 } percentile95 := int64(-1) if d95, err := ss.records[i95].GetCmdDuration(); err == nil { percentile95 = d95.Nanoseconds() } fmt.Fprintf(w, "%s\t%d\t%d\t%d\t%d\t%d\t%d\t\n", ss.cmd, len(ss.records), ss.incomplete, ss.min.Nanoseconds()/1000000, ss.max.Nanoseconds()/1000000, ss.sum.Nanoseconds()/1000000/c, percentile95/1000000) fmt.Fprintln(w) w.Flush() } // The 10 slowest occurence { w := new(tabwriter.Writer) w.Init(os.Stdout, 5, 0, 2, ' ', tabwriter.TabIndent) fmt.Fprintln(w, "Duration(ms)\tcmd\t") min := 10 if len(ss.records) < 10 { min = len(ss.records) } for i := 1; i <= min; i++ { rec := ss.records[len(ss.records)-i] if d, err := rec.GetCmdDuration(); err != nil { fmt.Fprintf(w, "-\t%s\t\n", rec.Raw) } else { fmt.Fprintf(w, "%d\t%s\t\n", d.Nanoseconds()/1000000, rec.Raw) } } fmt.Fprintln(w) w.Flush() } return } } }