func showStats() { for _ = range time.Tick(time.Second * 10) { log.Printf("c:%6d in:%s out:%s", atomic.LoadInt32(&clients), gofmt.ByteSize(atomic.LoadUint64(&bytesRecved)), gofmt.ByteSize(atomic.LoadUint64(&bytesSent))) } }
func (this *NetReceiverInput) reportStats(r engine.InputRunner) { globals := engine.Globals() for _ = range r.Ticker() { globals.Printf("Total %s, speed: %s/s", gofmt.ByteSize(this.totalBytes), gofmt.ByteSize(this.periodBytes)) this.periodBytes = int64(0) } }
func RunSysStats(startedAt time.Time, interval time.Duration) { const nsInMs uint64 = 1000 * 1000 ticker := time.NewTicker(interval) defer func() { ticker.Stop() }() var ( ms = new(runtime.MemStats) rusage = &syscall.Rusage{} lastUserTime int64 lastSysTime int64 userTime int64 sysTime int64 userCpuUtil float64 sysCpuUtil float64 ) for _ = range ticker.C { runtime.ReadMemStats(ms) syscall.Getrusage(syscall.RUSAGE_SELF, rusage) syscall.Getrusage(syscall.RUSAGE_SELF, rusage) userTime = rusage.Utime.Sec*1000000000 + int64(rusage.Utime.Usec) sysTime = rusage.Stime.Sec*1000000000 + int64(rusage.Stime.Usec) userCpuUtil = float64(userTime-lastUserTime) * 100 / float64(interval) sysCpuUtil = float64(sysTime-lastSysTime) * 100 / float64(interval) lastUserTime = userTime lastSysTime = sysTime log.Info("ver:%s, since:%s, go:%d, gc:%dms/%d=%d, heap:{%s, %s, %s, %s %s} cpu:{%3.2f%%us, %3.2f%%sy}", BuildId, time.Since(startedAt), runtime.NumGoroutine(), ms.PauseTotalNs/nsInMs, ms.NumGC, ms.PauseTotalNs/(nsInMs*uint64(ms.NumGC))+1, gofmt.ByteSize(ms.HeapSys), // bytes it has asked the operating system for gofmt.ByteSize(ms.HeapAlloc), // bytes currently allocated in the heap gofmt.ByteSize(ms.HeapIdle), // bytes in the heap that are unused gofmt.ByteSize(ms.HeapReleased), // bytes returned to the operating system, 5m for scavenger gofmt.Comma(int64(ms.HeapObjects)), userCpuUtil, sysCpuUtil) } }
// @rest GET /v1/status func (this *manServer) statusHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) { log.Info("status %s(%s)", r.RemoteAddr, getHttpRemoteIp(r)) output := make(map[string]interface{}) output["options"] = Options output["loglevel"] = logLevel.String() output["manager"] = manager.Default.Dump() pubConns := int(atomic.LoadInt32(&this.gw.pubServer.activeConnN)) subConns := int(atomic.LoadInt32(&this.gw.subServer.activeConnN)) output["pubconn"] = strconv.Itoa(pubConns) output["subconn"] = strconv.Itoa(subConns) output["hh_appends"] = strconv.FormatInt(hh.Default.AppendN(), 10) output["hh_delivers"] = strconv.FormatInt(hh.Default.DeliverN(), 10) output["goroutines"] = strconv.Itoa(runtime.NumGoroutine()) var mem runtime.MemStats runtime.ReadMemStats(&mem) output["heap"] = gofmt.ByteSize(mem.HeapSys).String() output["objects"] = gofmt.Comma(int64(mem.HeapObjects)) b, err := json.MarshalIndent(output, "", " ") if err != nil { log.Error("%s(%s) %v", r.RemoteAddr, getHttpRemoteIp(r), err) } w.Write(b) }
func (this *Mirror) makeMirror(c1, c2 *zk.ZkCluster) { pub, err := this.makePub(c2) swallow(err) topics, topicsChanges, err := c1.WatchTopics() swallow(err) log.Printf("topics: %+v", topics) if len(topics) == 0 { log.Println("empty topics") return } group := fmt.Sprintf("%s.%s._mirror_", c1.Name(), c2.Name()) sub, err := this.makeSub(c1, group, topics) swallow(err) pumpStopper := make(chan struct{}) go this.pump(sub, pub, pumpStopper) LOOP: for { select { case <-topicsChanges: log.Println("topics changed, stopping pump...") pumpStopper <- struct{}{} // stop pump <-pumpStopper // await pump cleanup // refresh c1 topics topics, err = c1.Topics() if err != nil { // TODO how to handle this err? log.Println(err) } log.Printf("topics: %+v", topics) sub, err = this.makeSub(c1, group, topics) if err != nil { // TODO how to handle this err? log.Println(err) } go this.pump(sub, pub, pumpStopper) case <-this.quit: log.Println("awaiting pump cleanup...") <-pumpStopper log.Printf("total transferred: %s %smsgs", gofmt.ByteSize(this.transferBytes), gofmt.Comma(this.transferN)) break LOOP } } pub.Close() }
func runWatchdog(ticker *time.Ticker) { startTime := time.Now() ms := new(runtime.MemStats) for _ = range ticker.C { runtime.ReadMemStats(ms) globals.Printf("ver:%s, tick:%ds goroutine:%d, mem:%s, elapsed:%s\n", BuildID, options.tick, runtime.NumGoroutine(), gofmt.ByteSize(ms.Alloc), time.Since(startTime)) } }
func (this *engineStats) Runtime() map[string]interface{} { runtime.ReadMemStats(this.memStats) s := make(map[string]interface{}) s["goroutines"] = runtime.NumGoroutine() s["memory.allocated"] = gofmt.ByteSize(this.memStats.Alloc).String() s["memory.mallocs"] = gofmt.ByteSize(this.memStats.Mallocs).String() s["memory.frees"] = gofmt.ByteSize(this.memStats.Frees).String() s["memory.last_gc"] = this.memStats.LastGC s["memory.gc.num"] = this.memStats.NumGC s["memory.gc.num_per_second"] = float64(this.memStats.NumGC) / time. Since(this.startedAt).Seconds() s["memory.gc.num_freq"] = fmt.Sprintf("%.1fsec/gc", time. Since(this.startedAt).Seconds()/float64(this.memStats.NumGC)) s["memory.gc.total_pause"] = fmt.Sprintf("%dms", this.memStats.PauseTotalNs/uint64(time.Millisecond)) s["memory.heap.alloc"] = gofmt.ByteSize(this.memStats.HeapAlloc).String() s["memory.heap.sys"] = gofmt.ByteSize(this.memStats.HeapSys).String() s["memory.heap.idle"] = gofmt.ByteSize(this.memStats.HeapIdle).String() s["memory.heap.released"] = gofmt.ByteSize(this.memStats.HeapReleased).String() s["memory.heap.objects"] = gofmt.Comma(int64(this.memStats.HeapObjects)) s["memory.stack"] = gofmt.ByteSize(this.memStats.StackInuse).String() gcPausesMs := make([]string, 0, 20) for _, pauseNs := range this.memStats.PauseNs { if pauseNs == 0 { continue } pauseStr := fmt.Sprintf("%dms", pauseNs/uint64(time.Millisecond)) if pauseStr == "0ms" { continue } gcPausesMs = append(gcPausesMs, pauseStr) } s["memory.gc.pauses"] = gcPausesMs s["mem"] = *this.memStats return s }
func (this *routerStats) render(logger *log.Logger, elapsed int) { logger.Printf("Total:%10s %10s speed:%6s/s %10s/s max: %s/%s", gofmt.Comma(this.TotalProcessedMsgN), gofmt.ByteSize(this.TotalProcessedBytes), gofmt.Comma(int64(this.PeriodProcessedMsgN/int32(elapsed))), gofmt.ByteSize(this.PeriodProcessedBytes/int64(elapsed)), gofmt.ByteSize(this.PeriodMaxMsgBytes), gofmt.ByteSize(this.TotalMaxMsgBytes)) logger.Printf("Input:%10s %10s speed:%6s/s %10s/s", gofmt.Comma(int64(this.PeriodInputMsgN)), gofmt.ByteSize(this.PeriodInputBytes), gofmt.Comma(int64(this.PeriodInputMsgN/int32(elapsed))), gofmt.ByteSize(this.PeriodInputBytes/int64(elapsed))) }
func (this *Redis) render() { if !this.batchMode { termbox.Clear(termbox.ColorDefault, termbox.ColorDefault) } this.mu.Lock() defer this.mu.Unlock() if this.topOrderAsc { sortutil.AscByField(this.topInfos, this.topOrderCols[this.topOrderColIdx]) } else { sortutil.DescByField(this.topInfos, this.topOrderCols[this.topOrderColIdx]) } sortCols := make([]string, len(this.topOrderCols)) copy(sortCols, this.topOrderCols) for i, col := range sortCols { if col == this.selectedCol() { if this.topOrderAsc { sortCols[i] += " >" } else { sortCols[i] += " <" } } } var ( lines = []string{fmt.Sprintf("Host|Port|%s", strings.Join(sortCols, "|"))} sumDbsize, sumConns, sumOps, sumMem, sumRx, sumTx, sumMaxMem int64 ) for i := 0; i < len(this.topInfos); i++ { info := this.topInfos[i] sumDbsize += info.dbsize sumConns += info.conns sumOps += info.ops sumMem += info.mem sumMaxMem += info.maxmem sumRx += info.rx * 1024 / 8 sumTx += info.tx * 1024 / 8 if info.ops >= this.beep { this.warnPorts[strconv.Itoa(info.port)] = struct{}{} } this.maxDbSize = max(this.maxDbSize, info.dbsize) this.maxConns = max(this.maxConns, info.conns) this.maxOps = max(this.maxOps, info.ops) this.maxMem = max(this.maxMem, info.mem) this.maxMaxMem = max(this.maxMaxMem, info.maxmem) this.maxRx = max(this.maxRx, info.rx*1024/8) this.maxTx = max(this.maxTx, info.tx*1024/8) if info.memp > this.maxMemp { this.maxMemp = info.memp } if info.trp > this.maxTrp { this.maxTrp = info.trp } if i >= min(this.rows, len(this.topInfos)) { continue } l := fmt.Sprintf("%s|%d|%s|%s|%s|%s|%s|%6.1f|%s|%s|%8.1f", info.host, info.port, gofmt.Comma(info.dbsize), gofmt.Comma(info.conns), gofmt.Comma(info.ops), gofmt.ByteSize(info.mem), gofmt.ByteSize(info.maxmem), 100.*info.memp, gofmt.ByteSize(info.rx*1024/8), gofmt.ByteSize(info.tx*1024/8), info.trp) if this.beep > 0 { var val int64 switch this.selectedCol() { case "conns": val = info.conns case "rx": val = info.rx case "tx": val = info.tx case "dbsize": val = info.dbsize case "ops": val = info.ops case "mem": val = info.mem case "maxm": val = info.maxmem } if val > this.beep { //l += "\a" } } lines = append(lines, l) } lines = append(lines, fmt.Sprintf("-MAX-|%d|%s|%s|%s|%s|%s|%6.1f|%s|%s|%8.1f", len(this.topInfos), gofmt.Comma(this.maxDbSize), gofmt.Comma(this.maxConns), gofmt.Comma(this.maxOps), gofmt.ByteSize(this.maxMem), gofmt.ByteSize(this.maxMaxMem), 100.*this.maxMemp, gofmt.ByteSize(this.maxRx), gofmt.ByteSize(this.maxTx), this.maxTrp)) lines = append(lines, fmt.Sprintf("-TOTAL-|%d|%s|%s|%s|%s|%s|%6.1f|%s|%s|%8.1f", len(this.topInfos), gofmt.Comma(sumDbsize), gofmt.Comma(sumConns), gofmt.Comma(sumOps), gofmt.ByteSize(sumMem), gofmt.ByteSize(sumMaxMem), 100.*float64(sumMem)/float64(sumMaxMem), gofmt.ByteSize(sumRx), gofmt.ByteSize(sumTx), float64(sumTx)/float64(sumRx))) for row, line := range lines { if row == 0 { // header this.drawRow(line, row, termbox.ColorDefault, termbox.ColorBlue) } else if row == len(lines)-2 { // max line this.drawRow(line, row, termbox.ColorMagenta, termbox.ColorDefault) } else if row == len(lines)-1 { // total line this.drawRow(line, row, termbox.ColorYellow, termbox.ColorDefault) } else { tuples := strings.Split(line, "|") if _, present := this.selectedPorts[tuples[1]]; present { this.drawRow(line, row, termbox.ColorBlack, termbox.ColorCyan) } else if _, present := this.warnPorts[tuples[1]]; !present { this.drawRow(line, row, termbox.ColorDefault, termbox.ColorDefault) } else { this.drawRow(line, row, termbox.ColorDefault, termbox.ColorRed) } } } if !this.batchMode { termbox.Flush() } }
func (this *Mirror) pump(sub *consumergroup.ConsumerGroup, pub sarama.AsyncProducer, stop, stopped chan struct{}) { defer func() { log.Trace("closing sub, commit offsets...") sub.Close() stopped <- struct{}{} // notify others I'm done }() active := true backoff := time.Second * 2 idle := time.Second * 10 for { select { case <-this.quit: log.Trace("got signal quit") return case <-stop: // yes sir! log.Trace("got signal stop") return case <-time.After(idle): active = false log.Info("idle 10s waiting for new message") case msg, ok := <-sub.Messages(): if !ok { log.Warn("sub encounters end of message stream") return } if !active || this.Debug { log.Info("<-[#%d] T:%s M:%s", this.transferN, msg.Topic, string(msg.Value)) } active = true pub.Input() <- &sarama.ProducerMessage{ Topic: msg.Topic, Key: sarama.ByteEncoder(msg.Key), Value: sarama.ByteEncoder(msg.Value), } if this.AutoCommit { sub.CommitUpto(msg) } // rate limit, never overflood the limited bandwidth between IDCs // FIXME when compressed, the bandwidth calculation is wrong bytesN := len(msg.Topic) + len(msg.Key) + len(msg.Value) + 20 // payload overhead if this.bandwidthRateLimiter != nil && !this.bandwidthRateLimiter.Pour(bytesN) { log.Warn("%s -> bandwidth reached, backoff %s", gofmt.ByteSize(this.transferBytes), backoff) time.Sleep(backoff) } this.transferBytes += int64(bytesN) this.transferN++ if this.transferN%this.ProgressStep == 0 { log.Trace("%s %s %s", gofmt.Comma(this.transferN), gofmt.ByteSize(this.transferBytes), msg.Topic) } case err := <-sub.Errors(): log.Error("quitting pump %v", err) return } } }
func (this *Peek) Run(args []string) (exitCode int) { var ( cluster string zone string topicPattern string partitionId int wait time.Duration silence bool ) cmdFlags := flag.NewFlagSet("peek", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&zone, "z", ctx.ZkDefaultZone(), "") cmdFlags.StringVar(&cluster, "c", "", "") cmdFlags.StringVar(&topicPattern, "t", "", "") cmdFlags.IntVar(&partitionId, "p", 0, "") cmdFlags.BoolVar(&this.colorize, "color", true, "") cmdFlags.Int64Var(&this.lastN, "last", -1, "") cmdFlags.BoolVar(&this.pretty, "pretty", false, "") cmdFlags.IntVar(&this.limit, "n", -1, "") cmdFlags.StringVar(&this.column, "col", "", "") // TODO support multiple columns cmdFlags.BoolVar(&this.beep, "beep", false, "") cmdFlags.Int64Var(&this.offset, "offset", sarama.OffsetNewest, "") cmdFlags.BoolVar(&silence, "s", false, "") cmdFlags.DurationVar(&wait, "d", time.Hour, "") cmdFlags.BoolVar(&this.bodyOnly, "body", false, "") if err := cmdFlags.Parse(args); err != nil { return 1 } if this.pretty { this.bodyOnly = true } this.quit = make(chan struct{}) if silence { stats := newPeekStats() go stats.start() } zkzone := zk.NewZkZone(zk.DefaultConfig(zone, ctx.ZoneZkAddrs(zone))) msgChan := make(chan *sarama.ConsumerMessage, 20000) // msg aggerator channel if cluster == "" { zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { this.consumeCluster(zkcluster, topicPattern, partitionId, msgChan) }) } else { zkcluster := zkzone.NewCluster(cluster) this.consumeCluster(zkcluster, topicPattern, partitionId, msgChan) } signal.RegisterHandler(func(sig os.Signal) { log.Printf("received signal: %s", strings.ToUpper(sig.String())) log.Println("quiting...") this.once.Do(func() { close(this.quit) }) }, syscall.SIGINT, syscall.SIGTERM) var ( startAt = time.Now() msg *sarama.ConsumerMessage total int bytesN int64 ) var ( j map[string]interface{} prettyJSON bytes.Buffer ) LOOP: for { if time.Since(startAt) >= wait { this.Ui.Output(fmt.Sprintf("Total: %s msgs, %s, elapsed: %s", gofmt.Comma(int64(total)), gofmt.ByteSize(bytesN), time.Since(startAt))) elapsed := time.Since(startAt).Seconds() if elapsed > 1. { this.Ui.Output(fmt.Sprintf("Speed: %d/s", total/int(elapsed))) if total > 0 { this.Ui.Output(fmt.Sprintf("Size : %s/msg", gofmt.ByteSize(bytesN/int64(total)))) } } return } select { case <-this.quit: this.Ui.Output(fmt.Sprintf("Total: %s msgs, %s, elapsed: %s", gofmt.Comma(int64(total)), gofmt.ByteSize(bytesN), time.Since(startAt))) elapsed := time.Since(startAt).Seconds() if elapsed > 1. { this.Ui.Output(fmt.Sprintf("Speed: %d/s", total/int(elapsed))) if total > 0 { this.Ui.Output(fmt.Sprintf("Size : %s/msg", gofmt.ByteSize(bytesN/int64(total)))) } } return case <-time.After(time.Second): continue case msg = <-msgChan: if silence { stats.MsgCountPerSecond.Mark(1) stats.MsgBytesPerSecond.Mark(int64(len(msg.Value))) } else { var outmsg string if this.column != "" { if err := json.Unmarshal(msg.Value, &j); err != nil { this.Ui.Error(err.Error()) } else { var colVal string switch t := j[this.column].(type) { case string: colVal = t case float64: colVal = fmt.Sprintf("%.0f", t) case int: colVal = fmt.Sprintf("%d", t) } if this.bodyOnly { if this.pretty { if err = json.Indent(&prettyJSON, []byte(colVal), "", " "); err != nil { fmt.Println(err.Error()) } else { outmsg = string(prettyJSON.Bytes()) } } else { outmsg = colVal } } else if this.colorize { outmsg = fmt.Sprintf("%s/%d %s k:%s v:%s", color.Green(msg.Topic), msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), colVal) } else { // colored UI will have invisible chars output outmsg = fmt.Sprintf("%s/%d %s k:%s v:%s", msg.Topic, msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), colVal) } } } else { if this.bodyOnly { if this.pretty { json.Indent(&prettyJSON, msg.Value, "", " ") outmsg = string(prettyJSON.Bytes()) } else { outmsg = string(msg.Value) } } else if this.colorize { outmsg = fmt.Sprintf("%s/%d %s k:%s, v:%s", color.Green(msg.Topic), msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), string(msg.Value)) } else { // colored UI will have invisible chars output outmsg = fmt.Sprintf("%s/%d %s k:%s, v:%s", msg.Topic, msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), string(msg.Value)) } } if outmsg != "" { if this.beep { outmsg += "\a" } this.Ui.Output(outmsg) } } total++ bytesN += int64(len(msg.Value)) if this.limit > 0 && total >= this.limit { break LOOP } if this.lastN > 0 && total >= int(this.lastN) { break LOOP } } } return }
func (this *Segment) printSummary() { segments := make(map[string]map[int]map[int]int64) // dir:day:hour:size err := filepath.Walk(this.rootPath, func(path string, f os.FileInfo, err error) error { if f == nil { return err } if f.IsDir() || !this.isKafkaLogSegment(f.Name()) { return nil } dir := filepath.Base(filepath.Dir(path)) if _, present := segments[dir]; !present { segments[dir] = make(map[int]map[int]int64) } if _, present := segments[dir][f.ModTime().Day()]; !present { segments[dir][f.ModTime().Day()] = make(map[int]int64) } segments[dir][f.ModTime().Day()][f.ModTime().Hour()] += f.Size() return nil }) if err != nil { this.Ui.Error(err.Error()) } partitions := make([]string, 0, len(segments)) for dir, _ := range segments { partitions = append(partitions, dir) } sort.Strings(partitions) type segment struct { partition string day int hour int size int64 } var maxSegment segment var totalSize int64 for _, p := range partitions { summary := make([]segment, 0) for day, hourSize := range segments[p] { for hour, size := range hourSize { summary = append(summary, segment{ partition: p, day: day, hour: hour, size: size, }) } } sortutil.AscByField(summary, "size") if this.limit > 0 && len(summary) > this.limit { summary = summary[:this.limit] } for _, s := range summary { if s.size > maxSegment.size { maxSegment = s } totalSize += s.size this.Ui.Output(fmt.Sprintf("%50s day:%2d hour:%2d size:%s", p, s.day, s.hour, gofmt.ByteSize(s.size))) } } this.Ui.Output(fmt.Sprintf("%50s day:%2d hour:%2d size:%s", "MAX-"+maxSegment.partition, maxSegment.day, maxSegment.hour, gofmt.ByteSize(maxSegment.size))) this.Ui.Output(fmt.Sprintf("%50s %s", "-TOTAL-", gofmt.ByteSize(totalSize))) return }
func (this *Mirror) runMirror(c1, c2 *zk.ZkCluster, limit int64) { this.startedAt = time.Now() log.Info("start [%s/%s] -> [%s/%s] with bandwidth %sbps", c1.ZkZone().Name(), c1.Name(), c2.ZkZone().Name(), c2.Name(), gofmt.Comma(limit*8)) pub, err := this.makePub(c2) if err != nil { panic(err) } log.Trace("pub[%s/%s] created", c2.ZkZone().Name(), c2.Name()) go func(pub sarama.AsyncProducer, c *zk.ZkCluster) { for { select { case <-this.quit: return case err := <-pub.Errors(): // messages will only be returned here after all retry attempts are exhausted. // // e,g // Failed to produce message to topic xx: write tcp src->kfk: i/o timeout // kafka: broker not connected log.Error("pub[%s/%s] %v", c.ZkZone().Name(), c.Name(), err) } } }(pub, c2) group := this.groupName(c1, c2) ever := true round := 0 for ever { round++ topics, topicsChanges, err := c1.WatchTopics() if err != nil { log.Error("#%d [%s/%s]watch topics: %v", round, c1.ZkZone().Name(), c1.Name(), err) time.Sleep(time.Second * 10) continue } topics = this.realTopics(topics) sub, err := this.makeSub(c1, group, topics) if err != nil { log.Error("#%d [%s/%s] %v", round, c1.ZkZone().Name(), c1.Name(), err) time.Sleep(time.Second * 10) continue } log.Info("#%d starting pump [%s/%s] -> [%s/%s] %d topics with group %s for %+v", round, c1.ZkZone().Name(), c1.Name(), c2.ZkZone().Name(), c2.Name(), len(topics), group, topics) pumpStopper := make(chan struct{}) pumpStopped := make(chan struct{}) go this.pump(sub, pub, pumpStopper, pumpStopped) select { case <-topicsChanges: // TODO log the diff the topics log.Warn("#%d [%s/%s] topics changed, stopping pump...", round, c1.Name(), c2.Name()) pumpStopper <- struct{}{} // stop pump <-pumpStopped // await pump cleanup case <-this.quit: log.Info("#%d awaiting pump cleanup...", round) <-pumpStopped ever = false case <-pumpStopped: // pump encounters problems, just retry } } log.Info("total transferred: %s %smsgs", gofmt.ByteSize(this.transferBytes), gofmt.Comma(this.transferN)) log.Info("closing pub...") pub.Close() }
func (this *Histogram) showNetworkGrowth() ([]time.Time, []int64, []int64) { f, err := os.OpenFile(this.networkFile, os.O_RDONLY, 0660) swallow(err) defer f.Close() ts := make([]time.Time, 0) rx := make([]int64, 0) tx := make([]int64, 0) r := bufio.NewReader(f) var ( lastRx = int64(0) lastTx = int64(0) rxTotal, txTotal int64 tm string ) for { // CDM1C01-209018015: bytes:98975866482403 bytes:115679008715688 line, err := r.ReadString('\n') if err == io.EOF { if lastRx > 0 { t, e := time.Parse("Mon Jan 2 15:04:05 MST 2006", tm) swallow(e) ts = append(ts, t) rx = append(rx, rxTotal-lastRx) tx = append(tx, txTotal-lastTx) this.Ui.Output(fmt.Sprintf("%55s RX+:%10s/%-10s TX+:%10s/%-10s", tm, gofmt.ByteSize(rxTotal-lastRx), gofmt.ByteSize(lastRx), gofmt.ByteSize(txTotal-lastTx), gofmt.ByteSize(lastTx))) } break } line = strings.TrimSpace(line) if !strings.Contains(line, "bytes") { // time info: Thu Jun 16 22:45:01 CST 2016 if lastRx > 0 { t, e := time.Parse("Mon Jan 2 15:04:05 MST 2006", tm) swallow(e) ts = append(ts, t) rx = append(rx, rxTotal-lastRx) tx = append(tx, txTotal-lastTx) this.Ui.Output(fmt.Sprintf("%55s RX+:%10s/%-10s TX+:%10s/%-10s", tm, gofmt.ByteSize(rxTotal-lastRx), gofmt.ByteSize(lastRx), gofmt.ByteSize(txTotal-lastTx), gofmt.ByteSize(lastTx))) } tm = line lastRx = rxTotal lastTx = txTotal rxTotal = 0 txTotal = 0 } else { // CDM1C01-209018015: bytes:98975866482403 bytes:115679008715688 parts := strings.Split(line, " ") //host := strings.TrimRight(parts[0], ":") rxBytes := strings.Split(parts[1], ":")[1] txBytes := strings.Split(parts[2], ":")[1] n, err := strconv.ParseInt(rxBytes, 10, 64) swallow(err) rxTotal += n n, err = strconv.ParseInt(txBytes, 10, 64) swallow(err) txTotal += n } } return ts, rx, tx }