Пример #1
0
func (this *Pps) showPps(nic string) {
	tx := fmt.Sprintf("/sys/class/net/%s/statistics/tx_packets", nic)
	rx := fmt.Sprintf("/sys/class/net/%s/statistics/rx_packets", nic)

	var lastTx, lastRx int64
	s := int64(this.interval.Seconds())
	for {
		brx, err := ioutil.ReadFile(rx)
		swallow(err)
		btx, err := ioutil.ReadFile(tx)
		swallow(err)

		rxN, err := strconv.ParseInt(strings.TrimSpace(string(brx)), 10, 64)
		swallow(err)
		txN, err := strconv.ParseInt(strings.TrimSpace(string(btx)), 10, 64)
		swallow(err)

		if lastRx != 0 && lastTx != 0 {
			rxPps := (rxN - lastRx) / s
			txPps := (txN - lastTx) / s
			sumPps := rxPps + txPps

			this.Ui.Output(fmt.Sprintf("%10s rx:%-8s tx:%-8s sum:%-8s",
				nic, gofmt.Comma(rxPps), gofmt.Comma(txPps), gofmt.Comma(sumPps)))
		}

		lastRx = rxN
		lastTx = txN

		time.Sleep(this.interval)
	}
}
Пример #2
0
func (this *Clusters) printSummary(zkzone *zk.ZkZone, clusterPattern string, port string) {
	lines := []string{"Zone|Cluster|Brokers|Topics|Partitions|FlatMsg|Cum"}

	type summary struct {
		zone, cluster               string
		brokers, topics, partitions int
		flat, cum                   int64
	}
	summaries := make([]summary, 0, 10)
	zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
		if !patternMatched(zkcluster.Name(), clusterPattern) {
			return
		}

		brokers, topics, partitions, flat, cum := this.clusterSummary(zkcluster)
		summaries = append(summaries, summary{zkzone.Name(), zkcluster.Name(), brokers, topics, partitions,
			flat, cum})

	})
	sortutil.DescByField(summaries, "cum")
	var totalFlat, totalCum int64
	for _, s := range summaries {
		lines = append(lines, fmt.Sprintf("%s|%s|%d|%d|%d|%s|%s",
			s.zone, s.cluster, s.brokers, s.topics, s.partitions,
			gofmt.Comma(s.flat), gofmt.Comma(s.cum)))

		totalCum += s.cum
		totalFlat += s.flat
	}
	this.Ui.Output(columnize.SimpleFormat(lines))
	this.Ui.Output(fmt.Sprintf("Flat:%s Cum:%s", gofmt.Comma(totalFlat), gofmt.Comma(totalCum)))
}
Пример #3
0
func (this *FunServantImpl) showStats() {
	ticker := time.NewTicker(config.Engine.Servants.StatsOutputInterval)
	defer ticker.Stop()

	for _ = range ticker.C {
		log.Info("svt: {slow:%s peer.from:%s, peer.to:%s}",
			gofmt.Comma(svtStats.callsSlow),
			gofmt.Comma(svtStats.callsFromPeer),
			gofmt.Comma(svtStats.callsToPeer))
	}
}
Пример #4
0
func (this *routerStats) render(logger *log.Logger, elapsed int) {
	logger.Printf("Total:%10s %10s speed:%6s/s %10s/s max: %s/%s",
		gofmt.Comma(this.TotalProcessedMsgN),
		gofmt.ByteSize(this.TotalProcessedBytes),
		gofmt.Comma(int64(this.PeriodProcessedMsgN/int32(elapsed))),
		gofmt.ByteSize(this.PeriodProcessedBytes/int64(elapsed)),
		gofmt.ByteSize(this.PeriodMaxMsgBytes),
		gofmt.ByteSize(this.TotalMaxMsgBytes))
	logger.Printf("Input:%10s %10s speed:%6s/s %10s/s",
		gofmt.Comma(int64(this.PeriodInputMsgN)),
		gofmt.ByteSize(this.PeriodInputBytes),
		gofmt.Comma(int64(this.PeriodInputMsgN/int32(elapsed))),
		gofmt.ByteSize(this.PeriodInputBytes/int64(elapsed)))
}
Пример #5
0
func (this *Histogram) showOffsetGrowth() ([]time.Time, []int64) {
	f, err := os.OpenFile(this.offsetFile, os.O_RDONLY, 0660)
	swallow(err)
	defer f.Close()

	ts := make([]time.Time, 0)
	vs := make([]int64, 0)

	r := bufio.NewReader(f)
	var (
		lastN = int64(0)
		tm    string
	)

	for {
		line, err := r.ReadString('\n')
		if err == io.EOF {
			break
		}

		line = strings.TrimSpace(line)

		if !strings.Contains(line, "CUM Messages") {
			// time info: Thu Jun 16 22:45:01 CST 2016
			tm = line
		} else {
			// offset:            -CUM Messages- 255,705,684,384
			n := strings.Split(line, "-CUM Messages-")[1]
			n = strings.Replace(n, ",", "", -1)
			n = strings.TrimSpace(n)
			offset, err := strconv.ParseInt(n, 10, 64)
			swallow(err)
			if lastN > 0 {
				t, e := time.Parse("Mon Jan 2 15:04:05 MST 2006", tm)
				swallow(e)
				ts = append(ts, t)
				vs = append(vs, offset-lastN)

				this.Ui.Output(fmt.Sprintf("%55s Message+ %15s/%s", tm,
					gofmt.Comma(offset-lastN), gofmt.Comma(lastN)))
			}

			lastN = offset
		}
	}

	return ts, vs
}
Пример #6
0
func (this *TFunServer) showStats(interval time.Duration) {
	ticker := time.NewTicker(interval)
	defer ticker.Stop()

	for _ = range ticker.C {
		log.Info("rpc: {session.on:%d/%s, call.err:%s/%s, qps:{1m:%.0f, 5m:%.0f 15m:%.0f avg:%.0f}}",
			atomic.LoadInt64(&this.activeSessionN),
			gofmt.Comma(atomic.LoadInt64(&this.cumSessions)),
			gofmt.Comma(atomic.LoadInt64(&this.cumCallErrs)),
			gofmt.Comma(atomic.LoadInt64(&this.cumCalls)),
			this.stats.CallPerSecond.Rate1(),
			this.stats.CallPerSecond.Rate5(),
			this.stats.CallPerSecond.Rate15(),
			this.stats.CallPerSecond.RateMean())
	}
}
Пример #7
0
// @rest GET /v1/status
func (this *manServer) statusHandler(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
	log.Info("status %s(%s)", r.RemoteAddr, getHttpRemoteIp(r))

	output := make(map[string]interface{})
	output["options"] = Options
	output["loglevel"] = logLevel.String()
	output["manager"] = manager.Default.Dump()
	pubConns := int(atomic.LoadInt32(&this.gw.pubServer.activeConnN))
	subConns := int(atomic.LoadInt32(&this.gw.subServer.activeConnN))
	output["pubconn"] = strconv.Itoa(pubConns)
	output["subconn"] = strconv.Itoa(subConns)
	output["hh_appends"] = strconv.FormatInt(hh.Default.AppendN(), 10)
	output["hh_delivers"] = strconv.FormatInt(hh.Default.DeliverN(), 10)
	output["goroutines"] = strconv.Itoa(runtime.NumGoroutine())

	var mem runtime.MemStats
	runtime.ReadMemStats(&mem)
	output["heap"] = gofmt.ByteSize(mem.HeapSys).String()
	output["objects"] = gofmt.Comma(int64(mem.HeapObjects))

	b, err := json.MarshalIndent(output, "", "    ")
	if err != nil {
		log.Error("%s(%s) %v", r.RemoteAddr, getHttpRemoteIp(r), err)
	}

	w.Write(b)
}
Пример #8
0
func main() {
	cf := config.NewDefaultProxy()
	cf.IoTimeout = time.Hour
	cf.TcpNoDelay = tcpNoDelay
	prx := proxy.New(cf)

	etclib.Dial([]string{zk})
	go prx.StartMonitorCluster()
	prx.AwaitClusterTopologyReady()

	// test pool
	if testPool {
		testServantPool(prx)
		pause("pool tested")
	}

	go report.run()

	wg := new(sync.WaitGroup)
	t1 := time.Now()
	for k := c1; k <= c2; k += 10 {
		Concurrency = k

		cf.PoolCapacity = Concurrency
		prx = proxy.New(cf)

		for i := 0; i < Rounds; i++ {
			for j := 0; j < k; j++ {
				wg.Add(1)
				go runSession(prx, wg, i+1, j)
			}

			wg.Wait()
		}
	}

	elapsed := time.Since(t1)
	log.Printf("Elapsed: %s, calls: {%s, %.1f/s}, sessions: {%s, %.1f/s}, errors: {conn:%d, io:%d call:%d}",
		elapsed,
		gofmt.Comma(report.callOk),
		float64(report.callOk)/elapsed.Seconds(),
		gofmt.Comma(int64(report.sessionN)),
		float64(report.sessionN)/elapsed.Seconds(),
		report.connErrs,
		report.ioErrs,
		report.callErrs)
}
Пример #9
0
func (this *Haproxy) getStats(statsUri string) (header string, rows []string) {
	client := http.Client{Timeout: time.Second * 30}
	resp, err := client.Get(statsUri)
	swallow(err)
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		swallow(fmt.Errorf("fetch[%s] stats got status: %d", resp.StatusCode))
	}

	var records map[string]map[string]int64
	reader := json.NewDecoder(resp.Body)
	err = reader.Decode(&records)
	swallow(err)

	u, err := url.Parse(statsUri)
	swallow(err)
	var shortHostname string
	if strings.Contains(u.Host, ":") {
		u.Host = u.Host[:strings.Index(u.Host, ":")]
	}
	tuples := strings.SplitN(u.Host, ".", 4)
	if len(tuples) < 4 {
		shortHostname = u.Host
	} else {
		shortHostname = tuples[3]
	}
	if len(shortHostname) > 8 {
		shortHostname = shortHostname[:8]
	}

	sortedSvcs := make([]string, 0)
	for svc, _ := range records {
		sortedSvcs = append(sortedSvcs, svc)
	}
	sort.Strings(sortedSvcs)

	sortedCols := make([]string, 0)
	for k, _ := range records["pub"] {
		sortedCols = append(sortedCols, k)
	}
	sort.Strings(sortedCols)

	header = strings.Join(append([]string{"host", "svc"}, sortedCols...), "|")
	for _, svc := range sortedSvcs {
		stats := records[svc]

		var vals = []string{shortHostname, svc}
		for _, k := range sortedCols {
			v := stats[k]

			vals = append(vals, gofmt.Comma(v))
		}

		rows = append(rows, strings.Join(vals, "|"))
	}

	return
}
Пример #10
0
func (this *Mirror) pump(sub *consumergroup.ConsumerGroup, pub sarama.AsyncProducer, stop chan struct{}) {
	defer func() {
		log.Println("pump cleanup...")
		sub.Close()
		log.Println("pump cleanup ok")

		stop <- struct{}{} // notify others I'm done
	}()

	log.Printf("start pumping")
	active := false
	for {
		select {
		case <-this.quit:
			return

		case <-stop:
			// yes sir!
			return

		case <-time.After(time.Second * 10):
			active = false
			log.Println("idle 10s waiting for new msg")

		case msg := <-sub.Messages():
			if !active || this.debug {
				log.Printf("<-[%d] T:%s M:%s", this.transferN, msg.Topic, string(msg.Value))
			}
			active = true

			pub.Input() <- &sarama.ProducerMessage{
				Topic: msg.Topic,
				Key:   sarama.ByteEncoder(msg.Key),
				Value: sarama.ByteEncoder(msg.Value),
			}
			if this.autoCommit {
				sub.CommitUpto(msg)
			}

			// rate limit, never overflood the limited bandwidth between IDCs
			// FIXME when compressed, the bandwidth calculation is wrong
			bytesN := len(msg.Topic) + len(msg.Key) + len(msg.Value) + 20 // payload overhead
			if !this.bandwidthRateLimiter.Pour(bytesN) {
				time.Sleep(time.Second)
				this.Ui.Warn(fmt.Sprintf("%d -> bandwidth reached, backoff 1s", bytesN))
			}
			this.transferBytes += int64(bytesN)

			this.transferN++
			if this.transferN%this.progressStep == 0 {
				log.Println(gofmt.Comma(this.transferN))
			}

		case err := <-sub.Errors():
			this.Ui.Error(err.Error()) // TODO
		}
	}
}
Пример #11
0
func (this *Mirror) makeMirror(c1, c2 *zk.ZkCluster) {
	pub, err := this.makePub(c2)
	swallow(err)

	topics, topicsChanges, err := c1.WatchTopics()
	swallow(err)

	log.Printf("topics: %+v", topics)
	if len(topics) == 0 {
		log.Println("empty topics")
		return
	}

	group := fmt.Sprintf("%s.%s._mirror_", c1.Name(), c2.Name())
	sub, err := this.makeSub(c1, group, topics)
	swallow(err)

	pumpStopper := make(chan struct{})
	go this.pump(sub, pub, pumpStopper)

LOOP:
	for {
		select {
		case <-topicsChanges:
			log.Println("topics changed, stopping pump...")
			pumpStopper <- struct{}{} // stop pump
			<-pumpStopper             // await pump cleanup

			// refresh c1 topics
			topics, err = c1.Topics()
			if err != nil {
				// TODO how to handle this err?
				log.Println(err)
			}

			log.Printf("topics: %+v", topics)

			sub, err = this.makeSub(c1, group, topics)
			if err != nil {
				// TODO how to handle this err?
				log.Println(err)
			}

			go this.pump(sub, pub, pumpStopper)

		case <-this.quit:
			log.Println("awaiting pump cleanup...")
			<-pumpStopper
			log.Printf("total transferred: %s %smsgs",
				gofmt.ByteSize(this.transferBytes),
				gofmt.Comma(this.transferN))
			break LOOP
		}
	}

	pub.Close()
}
Пример #12
0
func (this *Mirror) Run(args []string) (exitCode int) {
	cmdFlags := flag.NewFlagSet("mirror", flag.ContinueOnError)
	cmdFlags.Usage = func() { this.Ui.Output(this.Help()) }
	cmdFlags.StringVar(&this.zone1, "z1", "", "")
	cmdFlags.StringVar(&this.zone2, "z2", "", "")
	cmdFlags.StringVar(&this.cluster1, "c1", "", "")
	cmdFlags.StringVar(&this.cluster2, "c2", "", "")
	cmdFlags.StringVar(&this.excludes, "excluded", "", "")
	cmdFlags.BoolVar(&this.debug, "debug", false, "")
	cmdFlags.StringVar(&this.compress, "compress", "", "")
	cmdFlags.Int64Var(&this.bandwidthLimit, "net", 100, "")
	cmdFlags.BoolVar(&this.autoCommit, "commit", true, "")
	cmdFlags.Int64Var(&this.progressStep, "step", 5000, "")
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	if validateArgs(this, this.Ui).
		require("-z1", "-z2", "-c1", "-c2").
		invalid(args) {
		return 2
	}

	this.topicsExcluded = make(map[string]struct{})
	for _, e := range strings.Split(this.excludes, ",") {
		this.topicsExcluded[e] = struct{}{}
	}

	log.SetOutput(os.Stdout)
	this.quit = make(chan struct{})
	limit := (1 << 20) * this.bandwidthLimit / 8
	this.bandwidthRateLimiter = ratelimiter.NewLeakyBucket(limit, time.Second)
	log.Printf("[%s]%s -> [%s]%s with bandwidth %sbps",
		this.zone1, this.cluster1,
		this.zone2, this.cluster2,
		gofmt.Comma(int64(limit*8)))
	signal.RegisterSignalsHandler(func(sig os.Signal) {
		log.Printf("received signal: %s", strings.ToUpper(sig.String()))
		log.Println("quiting...")

		this.once.Do(func() {
			close(this.quit)
		})
	}, syscall.SIGINT, syscall.SIGTERM)

	z1 := zk.NewZkZone(zk.DefaultConfig(this.zone1, ctx.ZoneZkAddrs(this.zone1)))
	z2 := zk.NewZkZone(zk.DefaultConfig(this.zone2, ctx.ZoneZkAddrs(this.zone2)))
	c1 := z1.NewCluster(this.cluster1)
	c2 := z2.NewCluster(this.cluster2)
	this.makeMirror(c1, c2)

	return
}
Пример #13
0
func (this *stats) run() {
	ticker := time.NewTicker(time.Second)
	defer ticker.Stop()

	t1 := time.Now()

	var lastCalls int64
	for _ = range ticker.C {
		if neatStat {
			log.Printf("c:%6d qps:%20s errs:%10s",
				Concurrency,
				gofmt.Comma(this.callOk-lastCalls),
				gofmt.Comma(this.callErrs))
		} else {
			log.Printf("%s c:%d sessions:%s calls:%s qps:%s errs:%s conns:%d go:%d",
				time.Since(t1),
				Concurrency,
				gofmt.Comma(int64(atomic.LoadInt32(&this.sessionN))),
				gofmt.Comma(atomic.LoadInt64(&this.callOk)),
				gofmt.Comma(this.callOk-lastCalls),
				gofmt.Comma(this.callErrs),
				atomic.LoadInt32(&this.concurrentN),
				runtime.NumGoroutine())
		}

		lastCalls = this.callOk
	}

}
Пример #14
0
func (this *Consumers) displayGroupOffsets(zkcluster *zk.ZkCluster, group, topic string, echo bool) []consumerGroupOffset {
	offsetMap := zkcluster.ConsumerOffsetsOfGroup(group)
	sortedTopics := make([]string, 0, len(offsetMap))
	for t, _ := range offsetMap {
		sortedTopics = append(sortedTopics, t)
	}
	sort.Strings(sortedTopics)

	r := make([]consumerGroupOffset, 0)

	for _, t := range sortedTopics {
		if !patternMatched(t, this.topicPattern) || (topic != "" && t != topic) {
			continue
		}

		sortedPartitionIds := make([]string, 0, len(offsetMap[t]))
		for partitionId, _ := range offsetMap[t] {
			sortedPartitionIds = append(sortedPartitionIds, partitionId)
		}
		sort.Strings(sortedPartitionIds)

		for _, partitionId := range sortedPartitionIds {
			r = append(r, consumerGroupOffset{
				topic:       t,
				partitionId: partitionId,
				offset:      gofmt.Comma(offsetMap[t][partitionId]),
			})

			if echo {
				this.Ui.Output(fmt.Sprintf("\t\t%s/%s Offset:%s",
					t, partitionId, gofmt.Comma(offsetMap[t][partitionId])))
			}

		}
	}

	return r

}
Пример #15
0
func (this *EsOutput) showPeriodicalStats() {
	if !this.showProgress {
		return
	}

	var (
		globals = engine.Globals()
		total   = 0
	)

	globals.Printf("ES types: %d, within %s", this.counters.Len(), this.reportInterval)
	for _, key := range this.counters.SortedKeys() {
		val := this.counters.Get(key)
		if val > 0 {
			total += val
			globals.Printf("%-50s %12s", key, gofmt.Comma(int64(val)))

			this.counters.Set(key, 0)
		}
	}

	globals.Printf("%50s %12s", "Sum", gofmt.Comma(int64(total)))
}
Пример #16
0
func RunSysStats(startedAt time.Time, interval time.Duration) {
	const nsInMs uint64 = 1000 * 1000

	ticker := time.NewTicker(interval)
	defer func() {
		ticker.Stop()
	}()

	var (
		ms           = new(runtime.MemStats)
		rusage       = &syscall.Rusage{}
		lastUserTime int64
		lastSysTime  int64
		userTime     int64
		sysTime      int64
		userCpuUtil  float64
		sysCpuUtil   float64
	)

	for _ = range ticker.C {
		runtime.ReadMemStats(ms)

		syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
		syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
		userTime = rusage.Utime.Sec*1000000000 + int64(rusage.Utime.Usec)
		sysTime = rusage.Stime.Sec*1000000000 + int64(rusage.Stime.Usec)
		userCpuUtil = float64(userTime-lastUserTime) * 100 / float64(interval)
		sysCpuUtil = float64(sysTime-lastSysTime) * 100 / float64(interval)

		lastUserTime = userTime
		lastSysTime = sysTime

		log.Info("ver:%s, since:%s, go:%d, gc:%dms/%d=%d, heap:{%s, %s, %s, %s %s} cpu:{%3.2f%%us, %3.2f%%sy}",
			BuildId,
			time.Since(startedAt),
			runtime.NumGoroutine(),
			ms.PauseTotalNs/nsInMs,
			ms.NumGC,
			ms.PauseTotalNs/(nsInMs*uint64(ms.NumGC))+1,
			gofmt.ByteSize(ms.HeapSys),      // bytes it has asked the operating system for
			gofmt.ByteSize(ms.HeapAlloc),    // bytes currently allocated in the heap
			gofmt.ByteSize(ms.HeapIdle),     // bytes in the heap that are unused
			gofmt.ByteSize(ms.HeapReleased), // bytes returned to the operating system, 5m for scavenger
			gofmt.Comma(int64(ms.HeapObjects)),
			userCpuUtil,
			sysCpuUtil)

	}
}
Пример #17
0
func (this *Topics) printSummary(zkzone *zk.ZkZone, clusterPattern string) {
	lines := []string{"Zone|Cluster|Topic|Partitions|FlatMsg|Cum"}

	var totalFlat, totalCum int64
	zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
		if !patternMatched(zkcluster.Name(), clusterPattern) {
			return
		}

		summaries := this.clusterSummary(zkcluster)
		sortutil.DescByField(summaries, "cum")
		for _, s := range summaries {
			lines = append(lines, fmt.Sprintf("%s|%s|%s|%d|%s|%s",
				s.zone, s.cluster, s.topic, s.partitions, gofmt.Comma(s.flat), gofmt.Comma(s.cum)))

			totalCum += s.cum
			totalFlat += s.flat
		}

	})

	this.Ui.Output(columnize.SimpleFormat(lines))
	this.Ui.Output(fmt.Sprintf("Flat:%s Cum:%s", gofmt.Comma(totalFlat), gofmt.Comma(totalCum)))
}
Пример #18
0
func (this *Haproxy) fetchStats(statsUri string) {
	client := http.Client{Timeout: time.Second * 30}
	resp, err := client.Get(statsUri)
	swallow(err)
	defer resp.Body.Close()

	if resp.StatusCode != http.StatusOK {
		swallow(fmt.Errorf("fetch[%s] stats got status: %d", resp.StatusCode))
	}

	var records map[string]map[string]int64
	reader := json.NewDecoder(resp.Body)
	err = reader.Decode(&records)
	swallow(err)

	u, err := url.Parse(statsUri)
	swallow(err)
	this.Ui.Info(u.Host)

	sortedSvcs := make([]string, 0)
	for svc, _ := range records {
		sortedSvcs = append(sortedSvcs, svc)
	}
	sort.Strings(sortedSvcs)

	sortedCols := make([]string, 0)
	for k, _ := range records["pub"] {
		sortedCols = append(sortedCols, k)
	}
	sort.Strings(sortedCols)

	lines := []string{strings.Join(append([]string{"svc"}, sortedCols...), "|")}
	for _, svc := range sortedSvcs {
		stats := records[svc]

		var vals = []string{svc}
		for _, k := range sortedCols {
			v := stats[k]

			vals = append(vals, gofmt.Comma(v))
		}

		lines = append(lines, strings.Join(vals, "|"))
	}

	this.Ui.Output(columnize.SimpleFormat(lines))
}
Пример #19
0
func (this *engineStats) Runtime() map[string]interface{} {
	runtime.ReadMemStats(this.memStats)

	s := make(map[string]interface{})
	s["goroutines"] = runtime.NumGoroutine()
	s["memory.allocated"] = gofmt.ByteSize(this.memStats.Alloc).String()
	s["memory.mallocs"] = gofmt.ByteSize(this.memStats.Mallocs).String()
	s["memory.frees"] = gofmt.ByteSize(this.memStats.Frees).String()
	s["memory.last_gc"] = this.memStats.LastGC
	s["memory.gc.num"] = this.memStats.NumGC
	s["memory.gc.num_per_second"] = float64(this.memStats.NumGC) / time.
		Since(this.startedAt).Seconds()
	s["memory.gc.num_freq"] = fmt.Sprintf("%.1fsec/gc", time.
		Since(this.startedAt).Seconds()/float64(this.memStats.NumGC))
	s["memory.gc.total_pause"] = fmt.Sprintf("%dms",
		this.memStats.PauseTotalNs/uint64(time.Millisecond))
	s["memory.heap.alloc"] = gofmt.ByteSize(this.memStats.HeapAlloc).String()
	s["memory.heap.sys"] = gofmt.ByteSize(this.memStats.HeapSys).String()
	s["memory.heap.idle"] = gofmt.ByteSize(this.memStats.HeapIdle).String()
	s["memory.heap.released"] = gofmt.ByteSize(this.memStats.HeapReleased).String()
	s["memory.heap.objects"] = gofmt.Comma(int64(this.memStats.HeapObjects))
	s["memory.stack"] = gofmt.ByteSize(this.memStats.StackInuse).String()
	gcPausesMs := make([]string, 0, 20)
	for _, pauseNs := range this.memStats.PauseNs {
		if pauseNs == 0 {
			continue
		}

		pauseStr := fmt.Sprintf("%dms",
			pauseNs/uint64(time.Millisecond))
		if pauseStr == "0ms" {
			continue
		}

		gcPausesMs = append(gcPausesMs, pauseStr)
	}
	s["memory.gc.pauses"] = gcPausesMs
	s["mem"] = *this.memStats

	return s
}
Пример #20
0
func (this *Topology) displayZoneTopology(zkzone *zk.ZkZone) {
	this.Ui.Output(zkzone.Name())

	// {cluster: {topic: brokerHostInfo}}
	brokerInstances := make(map[string]map[string]*brokerHostInfo)

	zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) {
		if len(liveBrokers) == 0 {
			this.Ui.Warn(fmt.Sprintf("empty brokers in cluster[%s]", cluster))
			return
		}
		if this.cluster != "" && this.cluster != cluster {
			return
		}

		brokerInstances[cluster] = make(map[string]*brokerHostInfo)

		for _, broker := range liveBrokers {
			if !patternMatched(broker.Host, this.hostPattern) {
				continue
			}

			if _, present := brokerInstances[cluster][broker.Host]; !present {
				brokerInstances[cluster][broker.Host] = newBrokerHostInfo()
			}
			brokerInstances[cluster][broker.Host].addPort(broker.Port, broker.Uptime())
		}

		// find how many partitions a broker is leading
		zkcluster := zkzone.NewCluster(cluster)
		brokerList := zkcluster.BrokerList()
		if len(brokerList) == 0 {
			this.Ui.Warn(fmt.Sprintf("empty brokers in cluster[%s]", cluster))
			return
		}

		kfk, err := sarama.NewClient(brokerList, sarama.NewConfig())
		if err != nil {
			this.Ui.Error(color.Red("    %+v %s", brokerList, err.Error()))
			return
		}

		topics, err := kfk.Topics()
		swallow(err)
		for _, topic := range topics {
			partions, err := kfk.WritablePartitions(topic)
			swallow(err)
			for _, partitionID := range partions {
				leader, err := kfk.Leader(topic, partitionID)
				swallow(err)
				host, _, err := net.SplitHostPort(leader.Addr())
				swallow(err)
				if !patternMatched(host, this.hostPattern) {
					continue
				}

				latestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetNewest)
				if err != nil {
					this.Ui.Error(fmt.Sprintf("%s %s %v", cluster, topic, err))
					continue
				}
				oldestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetOldest)
				if err != nil {
					this.Ui.Error(fmt.Sprintf("%s %s %v", cluster, topic, err))
					continue
				}

				brokerInstances[cluster][host].topicMsgs[topic] += (latestOffset - oldestOffset)
				brokerInstances[cluster][host].addTopicPartition(topic, partitionID)
			}
		}
	})

	hosts := make(map[string]struct{})
	zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
		for host, _ := range brokerInstances[zkcluster.Name()] {
			hosts[host] = struct{}{}
		}
	})
	sortedHosts := make([]string, 0)
	for host, _ := range hosts {
		sortedHosts = append(sortedHosts, host)
	}
	sort.Strings(sortedHosts)

	// sort by host ip
	sortedClusters := make([]string, 0, len(brokerInstances))
	for c, _ := range brokerInstances {
		sortedClusters = append(sortedClusters, c)
	}
	sort.Strings(sortedClusters)

	portN := 0
	hostN := 0
	topicN := 0
	partitionN := 0
	for _, host := range sortedHosts {
		tn := 0
		pn := 0
		mn := int64(0)
		ports := make([]int, 0)
		for _, cluster := range sortedClusters {
			if _, present := brokerInstances[cluster][host]; !present {
				continue
			}

			tn += len(brokerInstances[cluster][host].topicPartitions)
			pn += brokerInstances[cluster][host].leadingPartitions()
			mn += brokerInstances[cluster][host].totalMsgsInStock()
			ports = append(ports, brokerInstances[cluster][host].tcpPorts...)
		}

		portN += len(ports)
		topicN += tn
		partitionN += pn
		hostN += 1

		this.Ui.Output(fmt.Sprintf("  %s leading: %2dT %3dP %15sM ports %2d:%+v",
			color.Green("%15s", host),
			tn,
			pn,
			gofmt.Comma(mn),
			len(ports),
			ports))

		if this.verbose {
			for _, cluster := range sortedClusters {
				if _, present := brokerInstances[cluster][host]; !present {
					continue
				}

				for _, tcpPort := range brokerInstances[cluster][host].tcpPorts {
					this.Ui.Output(fmt.Sprintf("%40d %s", tcpPort,
						gofmt.PrettySince(brokerInstances[cluster][host].uptimes[tcpPort])))
				}
			}

			for _, cluster := range sortedClusters {
				if _, present := brokerInstances[cluster][host]; !present {
					continue
				}

				this.Ui.Output(color.Magenta("%30s", cluster))
				for topic, partitions := range brokerInstances[cluster][host].topicPartitions {
					this.Ui.Output(fmt.Sprintf("%40s: %15sM P%2d %+v",
						topic,
						gofmt.Comma(brokerInstances[cluster][host].topicMsgs[topic]),
						len(partitions), partitions))
				}

			}

		}
	}

	this.Ui.Output(fmt.Sprintf("%17s host:%d, topic:%d, partition:%d, instance:%d",
		"-TOTAL-",
		hostN, topicN, partitionN, portN))
}
Пример #21
0
func (this *Redis) render() {
	if !this.batchMode {
		termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
	}

	this.mu.Lock()
	defer this.mu.Unlock()

	if this.topOrderAsc {
		sortutil.AscByField(this.topInfos, this.topOrderCols[this.topOrderColIdx])
	} else {
		sortutil.DescByField(this.topInfos, this.topOrderCols[this.topOrderColIdx])
	}
	sortCols := make([]string, len(this.topOrderCols))
	copy(sortCols, this.topOrderCols)
	for i, col := range sortCols {
		if col == this.selectedCol() {
			if this.topOrderAsc {
				sortCols[i] += " >"
			} else {
				sortCols[i] += " <"
			}
		}
	}

	var (
		lines = []string{fmt.Sprintf("Host|Port|%s", strings.Join(sortCols, "|"))}

		sumDbsize, sumConns, sumOps, sumMem, sumRx, sumTx, sumMaxMem int64
	)
	for i := 0; i < len(this.topInfos); i++ {
		info := this.topInfos[i]

		sumDbsize += info.dbsize
		sumConns += info.conns
		sumOps += info.ops
		sumMem += info.mem
		sumMaxMem += info.maxmem
		sumRx += info.rx * 1024 / 8
		sumTx += info.tx * 1024 / 8

		if info.ops >= this.beep {
			this.warnPorts[strconv.Itoa(info.port)] = struct{}{}
		}

		this.maxDbSize = max(this.maxDbSize, info.dbsize)
		this.maxConns = max(this.maxConns, info.conns)
		this.maxOps = max(this.maxOps, info.ops)
		this.maxMem = max(this.maxMem, info.mem)
		this.maxMaxMem = max(this.maxMaxMem, info.maxmem)
		this.maxRx = max(this.maxRx, info.rx*1024/8)
		this.maxTx = max(this.maxTx, info.tx*1024/8)
		if info.memp > this.maxMemp {
			this.maxMemp = info.memp
		}
		if info.trp > this.maxTrp {
			this.maxTrp = info.trp
		}

		if i >= min(this.rows, len(this.topInfos)) {
			continue
		}

		l := fmt.Sprintf("%s|%d|%s|%s|%s|%s|%s|%6.1f|%s|%s|%8.1f",
			info.host, info.port,
			gofmt.Comma(info.dbsize), gofmt.Comma(info.conns), gofmt.Comma(info.ops),
			gofmt.ByteSize(info.mem), gofmt.ByteSize(info.maxmem),
			100.*info.memp,
			gofmt.ByteSize(info.rx*1024/8), gofmt.ByteSize(info.tx*1024/8),
			info.trp)
		if this.beep > 0 {
			var val int64
			switch this.selectedCol() {
			case "conns":
				val = info.conns
			case "rx":
				val = info.rx
			case "tx":
				val = info.tx
			case "dbsize":
				val = info.dbsize
			case "ops":
				val = info.ops
			case "mem":
				val = info.mem
			case "maxm":
				val = info.maxmem

			}

			if val > this.beep {
				//l += "\a"
			}
		}
		lines = append(lines, l)
	}
	lines = append(lines, fmt.Sprintf("-MAX-|%d|%s|%s|%s|%s|%s|%6.1f|%s|%s|%8.1f",
		len(this.topInfos),
		gofmt.Comma(this.maxDbSize), gofmt.Comma(this.maxConns), gofmt.Comma(this.maxOps),
		gofmt.ByteSize(this.maxMem), gofmt.ByteSize(this.maxMaxMem),
		100.*this.maxMemp,
		gofmt.ByteSize(this.maxRx), gofmt.ByteSize(this.maxTx),
		this.maxTrp))
	lines = append(lines, fmt.Sprintf("-TOTAL-|%d|%s|%s|%s|%s|%s|%6.1f|%s|%s|%8.1f",
		len(this.topInfos),
		gofmt.Comma(sumDbsize), gofmt.Comma(sumConns), gofmt.Comma(sumOps),
		gofmt.ByteSize(sumMem), gofmt.ByteSize(sumMaxMem),
		100.*float64(sumMem)/float64(sumMaxMem),
		gofmt.ByteSize(sumRx), gofmt.ByteSize(sumTx),
		float64(sumTx)/float64(sumRx)))

	for row, line := range lines {
		if row == 0 {
			// header
			this.drawRow(line, row, termbox.ColorDefault, termbox.ColorBlue)
		} else if row == len(lines)-2 {
			// max line
			this.drawRow(line, row, termbox.ColorMagenta, termbox.ColorDefault)
		} else if row == len(lines)-1 {
			// total line
			this.drawRow(line, row, termbox.ColorYellow, termbox.ColorDefault)
		} else {
			tuples := strings.Split(line, "|")
			if _, present := this.selectedPorts[tuples[1]]; present {
				this.drawRow(line, row, termbox.ColorBlack, termbox.ColorCyan)
			} else if _, present := this.warnPorts[tuples[1]]; !present {
				this.drawRow(line, row, termbox.ColorDefault, termbox.ColorDefault)
			} else {
				this.drawRow(line, row, termbox.ColorDefault, termbox.ColorRed)
			}
		}
	}

	if !this.batchMode {
		termbox.Flush()
	}
}
Пример #22
0
func (this *Lags) printConsumersLag(zkcluster *zk.ZkCluster) {
	// sort by group name
	consumersByGroup := zkcluster.ConsumersByGroup(this.groupPattern)
	sortedGroups := make([]string, 0, len(consumersByGroup))
	for group, _ := range consumersByGroup {
		sortedGroups = append(sortedGroups, group)
	}
	sort.Strings(sortedGroups)

	for _, group := range sortedGroups {
		lines := make([]string, 0, 100)

		sortedTopicAndPartitionIds := make([]string, 0)
		consumers := make(map[string]zk.ConsumerMeta)
		for _, t := range consumersByGroup[group] {
			key := fmt.Sprintf("%s:%s", t.Topic, t.PartitionId)
			sortedTopicAndPartitionIds = append(sortedTopicAndPartitionIds, key)

			consumers[key] = t
		}
		sort.Strings(sortedTopicAndPartitionIds)

		for _, topicAndPartitionId := range sortedTopicAndPartitionIds {
			consumer := consumers[topicAndPartitionId]

			if !patternMatched(consumer.Topic, this.topicPattern) {
				continue
			}

			var (
				lagOutput string
				symbol    string
			)
			if consumer.Lag > int64(this.lagThreshold) {
				lagOutput = color.Red("%15s", gofmt.Comma(consumer.Lag))
				if consumer.Online {
					symbol = color.Yellow("⚠︎︎")
				} else {
					symbol = color.Yellow("◎")
				}
			} else {
				lagOutput = color.Blue("%15s", gofmt.Comma(consumer.Lag))
				if consumer.Online {
					symbol = color.Green("◉")
				} else {
					symbol = color.Yellow("◎")
				}
			}

			if consumer.Online {
				if this.problematicMode && consumer.Lag <= int64(this.lagThreshold) {
					continue
				}

				var (
					host   string
					uptime string
				)
				if consumer.ConsumerZnode == nil {
					host = "unrecognized"
					uptime = "-"
				} else {
					host = color.Green("%s", consumer.ConsumerZnode.Host())
					if time.Since(consumer.ConsumerZnode.Uptime()) < time.Hour {
						uptime = color.Magenta(gofmt.PrettySince(consumer.ConsumerZnode.Uptime()))
					} else {
						uptime = gofmt.PrettySince(consumer.ConsumerZnode.Uptime())
					}
				}

				lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-15s %s %-10s %s %s",
					symbol,
					consumer.Topic, consumer.PartitionId,
					gofmt.Comma(consumer.ProducerOffset),
					gofmt.Comma(consumer.ConsumerOffset),
					lagOutput,
					gofmt.PrettySince(consumer.Mtime.Time()),
					host, uptime))
			} else if !this.onlineOnly {
				lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-12s %s %s",
					symbol,
					consumer.Topic, consumer.PartitionId,
					gofmt.Comma(consumer.ProducerOffset),
					gofmt.Comma(consumer.ConsumerOffset),
					lagOutput,
					gofmt.PrettySince(consumer.Mtime.Time())))
			}
		}

		if len(lines) > 0 {
			this.Ui.Output(strings.Repeat(" ", 4) + group)
			for _, l := range lines {
				this.Ui.Output(l)
			}
		}
	}
}
Пример #23
0
func (this *Lags) printConsumersLagTable(zkcluster *zk.ZkCluster) {
	lines := make([]string, 0)
	header := "ConsumerGroup|Topic/Partition|Produced|Consumed|Lag|Committed|Uptime"
	lines = append(lines, header)

	// sort by group name
	consumersByGroup := zkcluster.ConsumersByGroup(this.groupPattern)
	sortedGroups := make([]string, 0, len(consumersByGroup))
	for group, _ := range consumersByGroup {
		sortedGroups = append(sortedGroups, group)
	}
	sort.Strings(sortedGroups)

	for _, group := range sortedGroups {
		if !patternMatched(group, this.groupPattern) {
			continue
		}

		sortedTopicAndPartitionIds := make([]string, 0, len(consumersByGroup[group]))
		consumers := make(map[string]zk.ConsumerMeta)
		for _, t := range consumersByGroup[group] {
			key := fmt.Sprintf("%s:%s", t.Topic, t.PartitionId)
			sortedTopicAndPartitionIds = append(sortedTopicAndPartitionIds, key)

			consumers[key] = t
		}
		sort.Strings(sortedTopicAndPartitionIds)

		for _, topicAndPartitionId := range sortedTopicAndPartitionIds {
			consumer := consumers[topicAndPartitionId]

			if !patternMatched(consumer.Topic, this.topicPattern) {
				continue
			}
			if !consumer.Online {
				continue
			}
			if this.problematicMode && consumer.Lag <= int64(this.lagThreshold) {
				continue
			}
			if consumer.ConsumerZnode == nil {
				this.Ui.Warn(fmt.Sprintf("%+v has no znode", consumer))
				continue
			}

			lines = append(lines,
				fmt.Sprintf("%s|%s/%s|%s|%s|%s|%s|%s",
					group,
					consumer.Topic, consumer.PartitionId,
					gofmt.Comma(consumer.ProducerOffset),
					gofmt.Comma(consumer.ConsumerOffset),
					gofmt.Comma(consumer.Lag),
					gofmt.PrettySince(consumer.Mtime.Time()),
					gofmt.PrettySince(consumer.ConsumerZnode.Uptime())))
		}
	}

	if len(lines) > 1 {
		this.Ui.Info(fmt.Sprintf("%s ▾", zkcluster.Name()))
		this.Ui.Output(columnize.SimpleFormat(lines))
	}
}
Пример #24
0
func (this *Topics) Run(args []string) (exitCode int) {
	var (
		zone                    string
		cluster                 string
		addTopic                string
		delTopic                string
		killTopic, restoreTopic string
		replicas                int
		partitions              int
		retentionInMinute       int
		resetConf               bool
		debug                   bool
		summaryMode             bool
		configged               bool
	)
	cmdFlags := flag.NewFlagSet("brokers", flag.ContinueOnError)
	cmdFlags.Usage = func() { this.Ui.Output(this.Help()) }
	cmdFlags.StringVar(&zone, "z", ctx.ZkDefaultZone(), "")
	cmdFlags.StringVar(&this.topicPattern, "t", "", "")
	cmdFlags.StringVar(&cluster, "c", "", "")
	cmdFlags.BoolVar(&this.verbose, "l", false, "")
	cmdFlags.BoolVar(&this.ipInNumber, "n", false, "")
	cmdFlags.StringVar(&addTopic, "add", "", "")
	cmdFlags.BoolVar(&summaryMode, "sum", false, "")
	cmdFlags.StringVar(&killTopic, "kill", "", "")
	cmdFlags.StringVar(&restoreTopic, "restore", "", "")
	cmdFlags.BoolVar(&this.plainMode, "plain", false, "")
	cmdFlags.StringVar(&delTopic, "del", "", "")
	cmdFlags.IntVar(&partitions, "partitions", 1, "")
	cmdFlags.DurationVar(&this.since, "since", 0, "")
	cmdFlags.BoolVar(&configged, "cf", false, "")
	cmdFlags.BoolVar(&debug, "debug", false, "")
	cmdFlags.BoolVar(&resetConf, "cfreset", false, "")
	cmdFlags.Int64Var(&this.count, "count", 0, "")
	cmdFlags.IntVar(&retentionInMinute, "retention", -1, "")
	cmdFlags.IntVar(&replicas, "replicas", 2, "")
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	if validateArgs(this, this.Ui).
		on("-add", "-c").
		on("-del", "-c").
		on("-retention", "-c", "-t").
		on("-cfreset", "-c", "-t").
		requireAdminRights("-add", "-del", "-retention").
		invalid(args) {
		return 2
	}

	if this.count > 0 {
		this.verbose = true
	}

	if debug {
		sarama.Logger = log.New(os.Stderr, color.Magenta("[sarama]"), log.LstdFlags)
	}

	if addTopic != "" {
		zkzone := zk.NewZkZone(zk.DefaultConfig(zone, ctx.ZoneZkAddrs(zone)))
		zkcluster := zkzone.NewCluster(cluster)
		swallow(this.addTopic(zkcluster, addTopic, replicas, partitions))

		return
	} else if delTopic != "" {
		zkzone := zk.NewZkZone(zk.DefaultConfig(zone, ctx.ZoneZkAddrs(zone)))
		zkcluster := zkzone.NewCluster(cluster)
		swallow(this.delTopic(zkcluster, delTopic))

		return
	}

	ensureZoneValid(zone)

	zkzone := zk.NewZkZone(zk.DefaultConfig(zone, ctx.ZoneZkAddrs(zone)))
	if retentionInMinute > 0 {
		zkcluster := zkzone.NewCluster(cluster)
		this.configTopic(zkcluster, this.topicPattern, retentionInMinute)
		return
	}

	if resetConf {
		zkcluster := zkzone.NewCluster(cluster)
		this.resetTopicConfig(zkcluster, this.topicPattern)
		configged = true // after reset, display most recent znode info
	}

	if configged {
		// output header
		this.Ui.Output(fmt.Sprintf("%25s %-40s %15s", "cluster", "topic", "mtime"))
		this.Ui.Output(fmt.Sprintf("%25s %40s %15s",
			strings.Repeat("-", 25), strings.Repeat("-", 40), strings.Repeat("-", 15)))

		displayTopicConfigs := func(zkcluster *zk.ZkCluster) {
			// sort by topic name
			configs := zkcluster.ConfiggedTopics()
			sortedTopics := make([]string, 0, len(configs))
			for topic, _ := range configs {
				sortedTopics = append(sortedTopics, topic)
			}
			sort.Strings(sortedTopics)

			for _, topic := range sortedTopics {
				if !patternMatched(topic, this.topicPattern) {
					continue
				}

				configInfo := configs[topic]
				this.Ui.Output(fmt.Sprintf("%25s %40s %15s %s",
					zkcluster.Name(),
					topic,
					gofmt.PrettySince(configInfo.Mtime),
					configInfo.Config))
			}

		}

		if cluster == "" {
			zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
				displayTopicConfigs(zkcluster)
			})
		} else {
			zkcluster := zkzone.NewCluster(cluster)
			displayTopicConfigs(zkcluster)
		}

		return
	}

	if summaryMode {
		this.printSummary(zkzone, cluster)
		return
	}

	if !this.verbose {
		// output header
		this.Ui.Output(fmt.Sprintf("%30s %-50s", "cluster", "topic"))
		this.Ui.Output(fmt.Sprintf("%30s %50s",
			strings.Repeat("-", 30), strings.Repeat("-", 50)))
	}

	if cluster != "" {
		zkcluster := zkzone.NewCluster(cluster)
		this.displayTopicsOfCluster(zkcluster)

		this.Ui.Output(fmt.Sprintf("%25s %d", "-TOTAL Topics-", this.topicN))
		this.Ui.Output(fmt.Sprintf("%25s %d", "-TOTAL Partitions-", this.partitionN))
		if this.verbose {
			if this.plainMode {
				fmt.Printf("%25s %s\n", "-FLAT Messages-", gofmt.Comma(this.totalMsgs))
				fmt.Printf("%25s %s\n", "-CUM Messages-", gofmt.Comma(this.totalOffsets))
			} else {
				this.Ui.Output(fmt.Sprintf("%25s %s", "-FLAT Messages-", gofmt.Comma(this.totalMsgs)))
				this.Ui.Output(fmt.Sprintf("%25s %s", "-CUM Messages-", gofmt.Comma(this.totalOffsets)))
			}

		}
		return
	}

	// all clusters
	zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
		this.displayTopicsOfCluster(zkcluster)
	})
	this.Ui.Output(fmt.Sprintf("%25s %d", "-TOTAL Topics-", this.topicN))
	this.Ui.Output(fmt.Sprintf("%25s %d", "-TOTAL Partitions-", this.partitionN))
	if this.verbose {
		if this.plainMode {
			fmt.Printf("%25s %s\n", "-FLAT Messages-", gofmt.Comma(this.totalMsgs))
			fmt.Printf("%25s %s\n", "-CUM Messages-", gofmt.Comma(this.totalOffsets))
		} else {
			this.Ui.Output(fmt.Sprintf("%25s %s", "-FLAT Messages-", gofmt.Comma(this.totalMsgs)))
			this.Ui.Output(fmt.Sprintf("%25s %s", "-CUM Messages-", gofmt.Comma(this.totalOffsets)))
		}

	}

	return
}
Пример #25
0
func (this *Mirror) pump(sub *consumergroup.ConsumerGroup, pub sarama.AsyncProducer,
	stop, stopped chan struct{}) {
	defer func() {
		log.Trace("closing sub, commit offsets...")
		sub.Close()

		stopped <- struct{}{} // notify others I'm done
	}()

	active := true
	backoff := time.Second * 2
	idle := time.Second * 10
	for {
		select {
		case <-this.quit:
			log.Trace("got signal quit")
			return

		case <-stop:
			// yes sir!
			log.Trace("got signal stop")
			return

		case <-time.After(idle):
			active = false
			log.Info("idle 10s waiting for new message")

		case msg, ok := <-sub.Messages():
			if !ok {
				log.Warn("sub encounters end of message stream")
				return
			}

			if !active || this.Debug {
				log.Info("<-[#%d] T:%s M:%s", this.transferN, msg.Topic, string(msg.Value))
			}
			active = true

			pub.Input() <- &sarama.ProducerMessage{
				Topic: msg.Topic,
				Key:   sarama.ByteEncoder(msg.Key),
				Value: sarama.ByteEncoder(msg.Value),
			}
			if this.AutoCommit {
				sub.CommitUpto(msg)
			}

			// rate limit, never overflood the limited bandwidth between IDCs
			// FIXME when compressed, the bandwidth calculation is wrong
			bytesN := len(msg.Topic) + len(msg.Key) + len(msg.Value) + 20 // payload overhead
			if this.bandwidthRateLimiter != nil && !this.bandwidthRateLimiter.Pour(bytesN) {
				log.Warn("%s -> bandwidth reached, backoff %s", gofmt.ByteSize(this.transferBytes), backoff)
				time.Sleep(backoff)
			}

			this.transferBytes += int64(bytesN)
			this.transferN++
			if this.transferN%this.ProgressStep == 0 {
				log.Trace("%s %s %s", gofmt.Comma(this.transferN), gofmt.ByteSize(this.transferBytes), msg.Topic)
			}

		case err := <-sub.Errors():
			log.Error("quitting pump %v", err)
			return
		}
	}
}
Пример #26
0
func (this *Peek) Run(args []string) (exitCode int) {
	var (
		cluster      string
		zone         string
		topicPattern string
		partitionId  int
		wait         time.Duration
		silence      bool
	)
	cmdFlags := flag.NewFlagSet("peek", flag.ContinueOnError)
	cmdFlags.Usage = func() { this.Ui.Output(this.Help()) }
	cmdFlags.StringVar(&zone, "z", ctx.ZkDefaultZone(), "")
	cmdFlags.StringVar(&cluster, "c", "", "")
	cmdFlags.StringVar(&topicPattern, "t", "", "")
	cmdFlags.IntVar(&partitionId, "p", 0, "")
	cmdFlags.BoolVar(&this.colorize, "color", true, "")
	cmdFlags.Int64Var(&this.lastN, "last", -1, "")
	cmdFlags.BoolVar(&this.pretty, "pretty", false, "")
	cmdFlags.IntVar(&this.limit, "n", -1, "")
	cmdFlags.StringVar(&this.column, "col", "", "") // TODO support multiple columns
	cmdFlags.BoolVar(&this.beep, "beep", false, "")
	cmdFlags.Int64Var(&this.offset, "offset", sarama.OffsetNewest, "")
	cmdFlags.BoolVar(&silence, "s", false, "")
	cmdFlags.DurationVar(&wait, "d", time.Hour, "")
	cmdFlags.BoolVar(&this.bodyOnly, "body", false, "")
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	if this.pretty {
		this.bodyOnly = true
	}

	this.quit = make(chan struct{})

	if silence {
		stats := newPeekStats()
		go stats.start()
	}

	zkzone := zk.NewZkZone(zk.DefaultConfig(zone, ctx.ZoneZkAddrs(zone)))
	msgChan := make(chan *sarama.ConsumerMessage, 20000) // msg aggerator channel
	if cluster == "" {
		zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
			this.consumeCluster(zkcluster, topicPattern, partitionId, msgChan)
		})
	} else {
		zkcluster := zkzone.NewCluster(cluster)
		this.consumeCluster(zkcluster, topicPattern, partitionId, msgChan)
	}

	signal.RegisterHandler(func(sig os.Signal) {
		log.Printf("received signal: %s", strings.ToUpper(sig.String()))
		log.Println("quiting...")

		this.once.Do(func() {
			close(this.quit)
		})
	}, syscall.SIGINT, syscall.SIGTERM)

	var (
		startAt = time.Now()
		msg     *sarama.ConsumerMessage
		total   int
		bytesN  int64
	)

	var (
		j          map[string]interface{}
		prettyJSON bytes.Buffer
	)

LOOP:
	for {
		if time.Since(startAt) >= wait {
			this.Ui.Output(fmt.Sprintf("Total: %s msgs, %s, elapsed: %s",
				gofmt.Comma(int64(total)), gofmt.ByteSize(bytesN), time.Since(startAt)))
			elapsed := time.Since(startAt).Seconds()
			if elapsed > 1. {
				this.Ui.Output(fmt.Sprintf("Speed: %d/s", total/int(elapsed)))
				if total > 0 {
					this.Ui.Output(fmt.Sprintf("Size : %s/msg", gofmt.ByteSize(bytesN/int64(total))))
				}
			}

			return
		}

		select {
		case <-this.quit:
			this.Ui.Output(fmt.Sprintf("Total: %s msgs, %s, elapsed: %s",
				gofmt.Comma(int64(total)), gofmt.ByteSize(bytesN), time.Since(startAt)))
			elapsed := time.Since(startAt).Seconds()
			if elapsed > 1. {
				this.Ui.Output(fmt.Sprintf("Speed: %d/s", total/int(elapsed)))
				if total > 0 {
					this.Ui.Output(fmt.Sprintf("Size : %s/msg", gofmt.ByteSize(bytesN/int64(total))))
				}
			}

			return

		case <-time.After(time.Second):
			continue

		case msg = <-msgChan:
			if silence {
				stats.MsgCountPerSecond.Mark(1)
				stats.MsgBytesPerSecond.Mark(int64(len(msg.Value)))
			} else {
				var outmsg string
				if this.column != "" {
					if err := json.Unmarshal(msg.Value, &j); err != nil {
						this.Ui.Error(err.Error())
					} else {
						var colVal string
						switch t := j[this.column].(type) {
						case string:
							colVal = t
						case float64:
							colVal = fmt.Sprintf("%.0f", t)
						case int:
							colVal = fmt.Sprintf("%d", t)
						}

						if this.bodyOnly {
							if this.pretty {
								if err = json.Indent(&prettyJSON, []byte(colVal), "", "    "); err != nil {
									fmt.Println(err.Error())
								} else {
									outmsg = string(prettyJSON.Bytes())
								}
							} else {
								outmsg = colVal
							}
						} else if this.colorize {
							outmsg = fmt.Sprintf("%s/%d %s k:%s v:%s",
								color.Green(msg.Topic), msg.Partition,
								gofmt.Comma(msg.Offset), string(msg.Key), colVal)
						} else {
							// colored UI will have invisible chars output
							outmsg = fmt.Sprintf("%s/%d %s k:%s v:%s",
								msg.Topic, msg.Partition,
								gofmt.Comma(msg.Offset), string(msg.Key), colVal)
						}
					}

				} else {
					if this.bodyOnly {
						if this.pretty {
							json.Indent(&prettyJSON, msg.Value, "", "    ")
							outmsg = string(prettyJSON.Bytes())
						} else {
							outmsg = string(msg.Value)
						}
					} else if this.colorize {
						outmsg = fmt.Sprintf("%s/%d %s k:%s, v:%s",
							color.Green(msg.Topic), msg.Partition,
							gofmt.Comma(msg.Offset), string(msg.Key), string(msg.Value))
					} else {
						// colored UI will have invisible chars output
						outmsg = fmt.Sprintf("%s/%d %s k:%s, v:%s",
							msg.Topic, msg.Partition,
							gofmt.Comma(msg.Offset), string(msg.Key), string(msg.Value))
					}
				}

				if outmsg != "" {
					if this.beep {
						outmsg += "\a"
					}

					this.Ui.Output(outmsg)
				}
			}

			total++
			bytesN += int64(len(msg.Value))

			if this.limit > 0 && total >= this.limit {
				break LOOP
			}
			if this.lastN > 0 && total >= int(this.lastN) {
				break LOOP
			}

		}
	}

	return
}
Пример #27
0
func (e *EngineConfig) ServeForever() {
	var (
		outputsWg = new(sync.WaitGroup)
		filtersWg = new(sync.WaitGroup)
		inputsWg  = new(sync.WaitGroup)
		globals   = Globals()
		err       error
	)

	// setup signal handler first to avoid race condition
	// if Input terminates very soon, global.Shutdown will
	// not be able to trap it
	globals.sigChan = make(chan os.Signal)
	signal.Notify(globals.sigChan, syscall.SIGINT, syscall.SIGHUP,
		syscall.SIGUSR2, syscall.SIGUSR1)

	e.launchHttpServ()

	if globals.Verbose {
		globals.Println("Launching Output(s)...")
	}
	for _, runner := range e.OutputRunners {
		outputsWg.Add(1)
		if err = runner.start(e, outputsWg); err != nil {
			panic(err)
		}
	}

	if globals.Verbose {
		globals.Println("Launching Filter(s)...")
	}
	for _, runner := range e.FilterRunners {
		filtersWg.Add(1)
		if err = runner.start(e, filtersWg); err != nil {
			panic(err)
		}
	}

	// setup the diagnostic trackers
	inputPackTracker := NewDiagnosticTracker("inputPackTracker")
	e.diagnosticTrackers[inputPackTracker.PoolName] = inputPackTracker
	filterPackTracker := NewDiagnosticTracker("filterPackTracker")
	e.diagnosticTrackers[filterPackTracker.PoolName] = filterPackTracker

	if globals.Verbose {
		globals.Printf("Initializing PipelinePack pools with size=%d\n",
			globals.RecyclePoolSize)
	}
	for i := 0; i < globals.RecyclePoolSize; i++ {
		inputPack := NewPipelinePack(e.inputRecycleChan)
		inputPackTracker.AddPack(inputPack)
		e.inputRecycleChan <- inputPack

		filterPack := NewPipelinePack(e.filterRecycleChan)
		filterPackTracker.AddPack(filterPack)
		e.filterRecycleChan <- filterPack
	}

	go inputPackTracker.Run(e.Int("diagnostic_interval", 20))
	go filterPackTracker.Run(e.Int("diagnostic_interval", 20))

	// check if we have enough recycle pool reservation
	go func() {
		t := time.NewTicker(time.Second * time.Duration(globals.TickerLength))
		defer t.Stop()

		var inputPoolSize, filterPoolSize int

		for _ = range t.C {
			inputPoolSize = len(e.inputRecycleChan)
			filterPoolSize = len(e.filterRecycleChan)
			if globals.Verbose || inputPoolSize == 0 || filterPoolSize == 0 {
				globals.Printf("Recycle poolSize: [input]%d [filter]%d",
					inputPoolSize, filterPoolSize)
			}
		}
	}()

	go e.router.Start()

	for _, project := range e.projects {
		project.Start()
	}

	if globals.Verbose {
		globals.Println("Launching Input(s)...")
	}
	for _, runner := range e.InputRunners {
		inputsWg.Add(1)
		if err = runner.start(e, inputsWg); err != nil {
			inputsWg.Done()
			panic(err)
		}
	}

	globals.Println("Engine mainloop, waiting for signals...")
	go runShutdownWatchdog(e)

	for !globals.Stopping {
		select {
		case sig := <-globals.sigChan:
			globals.Printf("Got signal %s\n", sig.String())
			switch sig {
			case syscall.SIGHUP:
				globals.Println("Reloading...")
				observer.Publish(RELOAD, nil)

			case syscall.SIGINT:
				globals.Println("Engine shutdown...")
				globals.Stopping = true

			case syscall.SIGUSR1:
				observer.Publish(SIGUSR1, nil)

			case syscall.SIGUSR2:
				observer.Publish(SIGUSR2, nil)
			}
		}
	}

	// cleanup after shutdown
	inputPackTracker.Stop()
	filterPackTracker.Stop()

	e.Lock()
	for _, runner := range e.InputRunners {
		if runner == nil {
			// this Input plugin already exit
			continue
		}

		if globals.Verbose {
			globals.Printf("Stop message sent to '%s'", runner.Name())
		}

		runner.Input().Stop()
	}
	e.Unlock()
	inputsWg.Wait() // wait for all inputs done
	if globals.Verbose {
		globals.Println("All Inputs terminated")
	}

	// ok, now we are sure no more inputs, but in route.inChan there
	// still may be filter injected packs and output not consumed packs
	// we must wait for all the packs to be consumed before shutdown

	for _, runner := range e.FilterRunners {
		if globals.Verbose {
			globals.Printf("Stop message sent to '%s'", runner.Name())
		}

		e.router.removeFilterMatcher <- runner.Matcher()
	}
	filtersWg.Wait()
	if globals.Verbose {
		globals.Println("All Filters terminated")
	}

	for _, runner := range e.OutputRunners {
		if globals.Verbose {
			globals.Printf("Stop message sent to '%s'", runner.Name())
		}

		e.router.removeOutputMatcher <- runner.Matcher()
	}
	outputsWg.Wait()
	if globals.Verbose {
		globals.Println("All Outputs terminated")
	}

	//close(e.router.inChan)

	e.stopHttpServ()

	for _, project := range e.projects {
		project.Stop()
	}

	globals.Printf("Shutdown with input:%s, dispatched:%s",
		gofmt.Comma(e.router.stats.TotalInputMsgN),
		gofmt.Comma(e.router.stats.TotalProcessedMsgN))
}
Пример #28
0
func (this *Topics) displayTopicsOfCluster(zkcluster *zk.ZkCluster) {
	echoBuffer := func(lines []string) {
		for _, l := range lines {
			this.Ui.Output(l)
		}
	}

	linesInTopicMode := make([]string, 0)
	if this.verbose {
		linesInTopicMode = this.echoOrBuffer(zkcluster.Name(), linesInTopicMode)
	}

	// get all alive brokers within this cluster
	brokers := zkcluster.Brokers()
	if len(brokers) == 0 {
		linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s", " ",
			color.Red("%s empty brokers", zkcluster.Name())), linesInTopicMode)
		echoBuffer(linesInTopicMode)
		return
	}

	if this.verbose {
		sortedBrokerIds := make([]string, 0, len(brokers))
		for brokerId, _ := range brokers {
			sortedBrokerIds = append(sortedBrokerIds, brokerId)
		}
		sort.Strings(sortedBrokerIds)
		for _, brokerId := range sortedBrokerIds {
			if this.ipInNumber {
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ",
					color.Green(brokerId), brokers[brokerId]), linesInTopicMode)
			} else {
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ",
					color.Green(brokerId), brokers[brokerId].NamedString()), linesInTopicMode)
			}

		}
	}

	kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
	if err != nil {
		if this.verbose {
			linesInTopicMode = this.echoOrBuffer(color.Yellow("%5s%+v %s", " ",
				zkcluster.BrokerList(), err.Error()), linesInTopicMode)
		}

		return
	}
	defer kfk.Close()

	topics, err := kfk.Topics()
	swallow(err)
	if len(topics) == 0 {
		if this.topicPattern == "" && this.verbose {
			linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%5s%s", " ",
				color.Magenta("no topics")), linesInTopicMode)
			echoBuffer(linesInTopicMode)
		}

		return
	}

	sortedTopics := make([]string, 0, len(topics))
	for _, t := range topics {
		sortedTopics = append(sortedTopics, t)
	}
	sort.Strings(sortedTopics)

	topicsCtime := zkcluster.TopicsCtime()
	hasTopicMatched := false
	for _, topic := range sortedTopics {
		if !patternMatched(topic, this.topicPattern) {
			continue
		}

		if this.since > 0 && time.Since(topicsCtime[topic]) > this.since {
			continue
		}

		this.topicN++

		hasTopicMatched = true
		if this.verbose {
			linesInTopicMode = this.echoOrBuffer(strings.Repeat(" ", 4)+color.Cyan(topic), linesInTopicMode)
		}

		// get partitions and check if some dead
		alivePartitions, err := kfk.WritablePartitions(topic)
		swallow(err)
		partions, err := kfk.Partitions(topic)
		swallow(err)
		if len(alivePartitions) != len(partions) {
			linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %s P: %s/%+v",
				zkcluster.Name(), color.Cyan("%-50s", topic), color.Red("partial dead"), color.Green("%+v", alivePartitions), partions), linesInTopicMode)
		}

		replicas, err := kfk.Replicas(topic, partions[0])
		if err != nil {
			this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partions[0], err))
		}

		this.partitionN += len(partions)
		if !this.verbose {
			linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %3dP %dR %s",
				zkcluster.Name(),
				color.Cyan("%-50s", topic),
				len(partions), len(replicas),
				gofmt.PrettySince(topicsCtime[topic])), linesInTopicMode)
			continue
		}

		for _, partitionID := range alivePartitions {
			leader, err := kfk.Leader(topic, partitionID)
			swallow(err)

			replicas, err := kfk.Replicas(topic, partitionID)
			if err != nil {
				this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partitionID, err))
			}

			isr, isrMtime, partitionCtime := zkcluster.Isr(topic, partitionID)
			isrMtimeSince := gofmt.PrettySince(isrMtime)
			if time.Since(isrMtime).Hours() < 24 {
				// ever out of sync last 24h
				isrMtimeSince = color.Magenta(isrMtimeSince)
			}

			underReplicated := false
			if len(isr) != len(replicas) {
				underReplicated = true
			}

			latestOffset, err := kfk.GetOffset(topic, partitionID,
				sarama.OffsetNewest)
			swallow(err)

			oldestOffset, err := kfk.GetOffset(topic, partitionID,
				sarama.OffsetOldest)
			swallow(err)

			if this.count > 0 && (latestOffset-oldestOffset) < this.count {
				continue
			}

			this.totalMsgs += latestOffset - oldestOffset
			this.totalOffsets += latestOffset
			if !underReplicated {
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%+v Offset:%16s - %-16s Num:%-15s %s-%s",
					partitionID,
					color.Green("%d", leader.ID()), replicas, isr,
					gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset),
					gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode)
			} else {
				// use red for alert
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%s Offset:%16s - %-16s Num:%-15s %s-%s",
					partitionID,
					color.Green("%d", leader.ID()), replicas, color.Red("%+v", isr),
					gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset),
					gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode)
			}
		}
	}

	if this.topicPattern != "" {
		if hasTopicMatched {
			echoBuffer(linesInTopicMode)
		}

	} else {
		echoBuffer(linesInTopicMode)
	}
}
Пример #29
0
func (this *Lags) Run(args []string) (exitCode int) {
	const secondsInMinute = 60
	var (
		cluster string
		zone    string
	)
	cmdFlags := flag.NewFlagSet("lags", flag.ContinueOnError)
	cmdFlags.Usage = func() { this.Ui.Output(this.Help()) }
	cmdFlags.StringVar(&zone, "z", ctx.ZkDefaultZone(), "")
	cmdFlags.StringVar(&cluster, "c", "", "")
	cmdFlags.BoolVar(&this.onlineOnly, "l", false, "")
	cmdFlags.BoolVar(&this.problematicMode, "p", false, "")
	cmdFlags.StringVar(&this.groupPattern, "g", "", "")
	cmdFlags.StringVar(&this.topicPattern, "t", "", "")
	cmdFlags.BoolVar(&this.tableFmt, "table", false, "")
	cmdFlags.BoolVar(&this.watchMode, "w", false, "")
	cmdFlags.IntVar(&this.lagThreshold, "lag", 5000, "")
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	if this.watchMode {
		refreshScreen()
	}

	if this.problematicMode {
		this.onlineOnly = true
	}

	zkzone := zk.NewZkZone(zk.DefaultConfig(zone, ctx.ZoneZkAddrs(zone)))
	bar := progress.New(secondsInMinute)
	if cluster == "" {
		for {
			zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
				if this.tableFmt {
					this.printConsumersLagTable(zkcluster)
				} else {
					this.printConsumersLag(zkcluster)
				}
			})

			if this.watchMode {
				for i := 1; i <= secondsInMinute; i++ {
					bar.ShowProgress(i)
					time.Sleep(time.Second)
				}
			} else {
				break
			}

			printSwallowedErrors(this.Ui, zkzone)
		}

		this.Ui.Output(fmt.Sprintf("Lag totals: %s", gofmt.Comma(this.lagTotal)))

		return
	}

	// display a single cluster
	for {
		zkcluster := zkzone.NewCluster(cluster) // panic if invalid cluster
		if this.tableFmt {
			this.printConsumersLagTable(zkcluster)
		} else {
			this.printConsumersLag(zkcluster)
		}

		if this.watchMode {
			for i := 1; i <= secondsInMinute; i++ {
				bar.ShowProgress(i)
				time.Sleep(time.Second)
			}
			//refreshScreen()
		} else {
			break
		}

		printSwallowedErrors(this.Ui, zkzone)
	}

	this.Ui.Output(fmt.Sprintf("Lag totals: %s", gofmt.Comma(this.lagTotal)))

	return
}
Пример #30
0
func (this *Mirror) runMirror(c1, c2 *zk.ZkCluster, limit int64) {
	this.startedAt = time.Now()

	log.Info("start [%s/%s] -> [%s/%s] with bandwidth %sbps",
		c1.ZkZone().Name(), c1.Name(),
		c2.ZkZone().Name(), c2.Name(),
		gofmt.Comma(limit*8))

	pub, err := this.makePub(c2)
	if err != nil {
		panic(err)
	}
	log.Trace("pub[%s/%s] created", c2.ZkZone().Name(), c2.Name())

	go func(pub sarama.AsyncProducer, c *zk.ZkCluster) {
		for {
			select {
			case <-this.quit:
				return

			case err := <-pub.Errors():
				// messages will only be returned here after all retry attempts are exhausted.
				//
				// e,g
				// Failed to produce message to topic xx: write tcp src->kfk: i/o timeout
				// kafka: broker not connected
				log.Error("pub[%s/%s] %v", c.ZkZone().Name(), c.Name(), err)
			}
		}
	}(pub, c2)

	group := this.groupName(c1, c2)
	ever := true
	round := 0
	for ever {
		round++

		topics, topicsChanges, err := c1.WatchTopics()
		if err != nil {
			log.Error("#%d [%s/%s]watch topics: %v", round, c1.ZkZone().Name(), c1.Name(), err)
			time.Sleep(time.Second * 10)
			continue
		}

		topics = this.realTopics(topics)
		sub, err := this.makeSub(c1, group, topics)
		if err != nil {
			log.Error("#%d [%s/%s] %v", round, c1.ZkZone().Name(), c1.Name(), err)
			time.Sleep(time.Second * 10)
			continue
		}

		log.Info("#%d starting pump [%s/%s] -> [%s/%s] %d topics with group %s for %+v", round,
			c1.ZkZone().Name(), c1.Name(),
			c2.ZkZone().Name(), c2.Name(), len(topics), group, topics)

		pumpStopper := make(chan struct{})
		pumpStopped := make(chan struct{})
		go this.pump(sub, pub, pumpStopper, pumpStopped)

		select {
		case <-topicsChanges:
			// TODO log the diff the topics
			log.Warn("#%d [%s/%s] topics changed, stopping pump...", round, c1.Name(), c2.Name())
			pumpStopper <- struct{}{} // stop pump
			<-pumpStopped             // await pump cleanup

		case <-this.quit:
			log.Info("#%d awaiting pump cleanup...", round)
			<-pumpStopped

			ever = false

		case <-pumpStopped:
			// pump encounters problems, just retry
		}
	}

	log.Info("total transferred: %s %smsgs",
		gofmt.ByteSize(this.transferBytes),
		gofmt.Comma(this.transferN))

	log.Info("closing pub...")
	pub.Close()
}