func (this *Kguard) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("kguard", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", ctx.ZkDefaultZone(), "") cmdFlags.BoolVar(&this.longFmt, "l", false, "") if err := cmdFlags.Parse(args); err != nil { return 2 } zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) kguards, err := zkzone.KguardInfos() if err != nil { this.Ui.Error(fmt.Sprintf("%s %v", zk.KguardLeaderPath, err.Error())) return } leader := kguards[0] this.Ui.Output(fmt.Sprintf("%s(out of %d candidates) up: %s", color.Green(leader.Host), leader.Candidates, gofmt.PrettySince(leader.Ctime))) if this.longFmt { this.showKguardVersion(leader.Host) this.showStats(leader.Host) } return }
func (this *Zookeeper) printLeader(zkzone *zk.ZkZone) { // FIXME all zones will only show the 1st zone info because it blocks others for { this.Ui.Output(color.Blue(zkzone.Name())) for zkhost, lines := range zkzone.RunZkFourLetterCommand("mntr") { if this.zkHost != "" && !strings.HasPrefix(zkhost, this.zkHost+":") { continue } parts := strings.Split(lines, "\n") for _, l := range parts { if strings.HasPrefix(l, "zk_server_state") && strings.HasSuffix(l, "leader") { this.Ui.Output(color.Green("%28s", zkhost)) break } } } if this.watchMode { time.Sleep(time.Second * 5) } else { break } } }
func (this *Ping) diagnose() { this.zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { registeredBrokers := zkcluster.RegisteredInfo().Roster for _, broker := range registeredBrokers { log.Debug("ping %s", broker.Addr()) kfk, err := sarama.NewClient([]string{broker.Addr()}, sarama.NewConfig()) if err != nil { log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error())) continue } _, err = kfk.Topics() // kafka didn't provide ping, so use Topics() as ping if err != nil { log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error())) } else { if !this.problematicMode { log.Info("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Green("ok")) } } kfk.Close() } }) }
func (this *Zktop) displayZoneTop(zkzone *zk.ZkZone) { if this.batchMode { this.Ui.Output(fmt.Sprintf("%s %s", zkzone.Name(), bjtime.NowBj())) } else { this.Ui.Output(color.Green(zkzone.Name())) } header := "VER SERVER PORT M OUTST RECVD SENT CONNS ZNODES LAT(MIN/AVG/MAX)" this.Ui.Output(header) stats := zkzone.RunZkFourLetterCommand("stat") sortedHosts := make([]string, 0, len(stats)) for hp, _ := range stats { sortedHosts = append(sortedHosts, hp) } sort.Strings(sortedHosts) for _, hostPort := range sortedHosts { host, port, err := net.SplitHostPort(hostPort) if err != nil { panic(err) } stat := zk.ParseStatResult(stats[hostPort]) if stat.Mode == "" { if this.batchMode { stat.Mode = "E" } else { stat.Mode = color.Red("E") } } else if stat.Mode == "L" && !this.batchMode { stat.Mode = color.Blue(stat.Mode) } var sentQps, recvQps int if lastRecv, present := this.lastRecvs[hostPort]; present { r1, _ := strconv.Atoi(stat.Received) r0, _ := strconv.Atoi(lastRecv) recvQps = (r1 - r0) / int(this.refreshInterval.Seconds()) s1, _ := strconv.Atoi(stat.Sent) s0, _ := strconv.Atoi(this.lastSents[hostPort]) sentQps = (s1 - s0) / int(this.refreshInterval.Seconds()) } this.Ui.Output(fmt.Sprintf("%-15s %-15s %5s %1s %6s %16s %16s %5s %7s %s", stat.Version, // 15 host, // 15 port, // 5 stat.Mode, // 1 stat.Outstanding, // 6 fmt.Sprintf("%s/%d", stat.Received, recvQps), // 16 fmt.Sprintf("%s/%d", stat.Sent, sentQps), // 16 stat.Connections, // 5 stat.Znodes, // 7 stat.Latency, )) this.lastRecvs[hostPort] = stat.Received this.lastSents[hostPort] = stat.Sent } }
func (this *Brokers) clusterBrokers(zone, cluster string, brokers map[string]*zk.BrokerZnode) []string { if !patternMatched(cluster, this.cluster) { return nil } if brokers == nil || len(brokers) == 0 { return []string{fmt.Sprintf("%s|%s|%s|%s|%s", zone, cluster, " ", color.Red("empty brokers"), " ")} } lines := make([]string, 0, len(brokers)) if this.staleOnly { // try each broker's aliveness for brokerId, broker := range brokers { cf := sarama.NewConfig() cf.Net.ReadTimeout = time.Second * 4 cf.Net.WriteTimeout = time.Second * 4 kfk, err := sarama.NewClient([]string{broker.Addr()}, cf) if err != nil { lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s", zone, cluster, brokerId, broker.Addr(), fmt.Sprintf("%s: %v", gofmt.PrettySince(broker.Uptime()), err))) } else { kfk.Close() } } return lines } // sort by broker id sortedBrokerIds := make([]string, 0, len(brokers)) for brokerId, _ := range brokers { sortedBrokerIds = append(sortedBrokerIds, brokerId) } sort.Strings(sortedBrokerIds) for _, brokerId := range sortedBrokerIds { b := brokers[brokerId] uptime := gofmt.PrettySince(b.Uptime()) if time.Since(b.Uptime()) < time.Hour*24*7 { uptime = color.Green(uptime) } if this.ipInNumber { lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s", zone, cluster, brokerId, b.Addr(), gofmt.PrettySince(b.Uptime()))) } else { lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s", zone, cluster, brokerId, b.NamedAddr(), gofmt.PrettySince(b.Uptime()))) } } return lines }
func (this *TopBroker) drawDashboard() { termui.Init() width := termui.TermWidth() height := termui.TermHeight() termui.Close() maxWidth := width - 23 var totalMaxQps, totalMaxBrokerQps float64 for { time.Sleep(this.interval) this.startAll() this.collectAll() datas, maxQps, totalQps := this.showAndResetCounters() if maxQps < 1 { // draw empty lines for _, data := range datas { this.Ui.Output(fmt.Sprintf("%20s", data.host)) } continue } if maxQps > totalMaxBrokerQps { totalMaxBrokerQps = maxQps } if totalQps > totalMaxQps { totalMaxQps = totalQps } refreshScreen() for idx, data := range datas { if idx >= height-2 { break } if data.qps < 0 { panic("negative qps") } this.renderQpsRow(data.host, data.qps, maxQps, maxWidth) } this.Ui.Output(fmt.Sprintf("%20s brokers:%d total:%s cum max[broker:%.1f total:%.1f]", "-SUMMARY-", len(datas), color.Green("%.1f", totalQps), totalMaxBrokerQps, totalMaxQps)) } }
func NewPubStore(poolCapcity int, idleTimeout time.Duration, compress bool, debug bool, dryRun bool) *pubStore { if debug { sarama.Logger = l.New(os.Stdout, color.Green("[Sarama]"), l.LstdFlags|l.Lshortfile) } return &pubStore{ hostname: ctx.Hostname(), compress: compress, idleTimeout: idleTimeout, pubPoolsCapcity: poolCapcity, pubPools: make(map[string]*pubPool), dryRun: dryRun, shutdownCh: make(chan struct{}), } }
func (this *TopBroker) renderQpsRow(host string, qps, maxQps float64, maxWidth int) { w := int(qps*100/maxQps) * maxWidth / 100 qpsStr := fmt.Sprintf("%.1f", qps) bar := "" barColorLen := 0 for i := 0; i < w-len(qpsStr); i++ { bar += color.Green("|") barColorLen += 9 // color.Green will add extra 9 chars } for i := len(bar) - barColorLen; i < maxWidth-len(qpsStr); i++ { bar += " " } bar += qpsStr this.Ui.Output(fmt.Sprintf("%20s [%s]", host, bar)) }
func (this *LsZk) printCluster(zkcluster *zk.ZkCluster) { this.Ui.Output(color.Green(zkcluster.Name())) children, err := zkcluster.ListChildren(this.recursive) if err != nil { this.Ui.Error(fmt.Sprintf("%s%s", strings.Repeat(" ", 4), err)) return } for _, c := range children { this.Ui.Output(fmt.Sprintf("%s%s", strings.Repeat(" ", 4), c)) if strings.HasSuffix(c, "brokers") { this.Ui.Output(fmt.Sprintf("%s%s/ids", strings.Repeat(" ", 4), c)) this.Ui.Output(fmt.Sprintf("%s%s/topics", strings.Repeat(" ", 4), c)) } } }
func (this *Clusters) verifyBrokers(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { zkcluster := zkzone.NewCluster(cluster) registeredBrokers := zkcluster.RegisteredInfo().Roster // find diff between registeredBrokers and liveBrokers // loop1 find liveBrokers>registeredBrokers for _, broker := range liveBrokers { foundInRoster := false for _, b := range registeredBrokers { bid := strconv.Itoa(b.Id) if bid == broker.Id && broker.Addr() == b.Addr() { foundInRoster = true break } } if !foundInRoster { // should manually register the broker this.Ui.Output(strings.Repeat(" ", 4) + color.Green("+ gk clusters -z %s -s -c %s -addbroker %s:%s", zkzone.Name(), cluster, broker.Id, broker.Addr())) } } // loop2 find liveBrokers<registeredBrokers for _, b := range registeredBrokers { foundInLive := false for _, broker := range liveBrokers { bid := strconv.Itoa(b.Id) if bid == broker.Id && broker.Addr() == b.Addr() { foundInLive = true break } } if !foundInLive { // the broker is dead this.Ui.Output(strings.Repeat(" ", 4) + color.Red("cluster[%s] broker[%d] %s is dead", cluster, b.Id, b.Addr())) } } }) }
func (this *Zookeeper) printZkStats(zkzone *zk.ZkZone) { for { this.Ui.Output(color.Blue(zkzone.Name())) for zkhost, lines := range zkzone.RunZkFourLetterCommand(this.flw) { if this.zkHost != "" && !strings.HasPrefix(zkhost, this.zkHost+":") { continue } this.Ui.Output(fmt.Sprintf("%s\n%s", color.Green("%28s", zkhost), lines)) } if this.watchMode { time.Sleep(time.Second * 5) } else { break } } }
func (this *Get) showChildrenRecursively(conn *zk.Conn, path string) { children, _, err := conn.Children(path) if err != nil { return } sort.Strings(children) for _, child := range children { if path == "/" { path = "" } znode := fmt.Sprintf("%s/%s", path, child) // display znode content data, stat, err := conn.Get(znode) must(err) if stat.EphemeralOwner > 0 { if patternMatched(znode, this.likePattern) { this.Ui.Output(color.Yellow(znode)) } } else { if patternMatched(znode, this.likePattern) { this.Ui.Output(color.Green(znode)) } } if len(data) > 0 && patternMatched(znode, this.likePattern) { if this.verbose { this.Ui.Output(fmt.Sprintf("%s %#v", strings.Repeat(" ", 3), stat)) this.Ui.Output(fmt.Sprintf("%s %v", strings.Repeat(" ", 3), data)) } this.Ui.Output(fmt.Sprintf("%s %s", strings.Repeat(" ", 3), string(data))) } this.showChildrenRecursively(conn, znode) } }
func (this *Discover) discoverClusters(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) existingClusters := zkzone.Clusters() existingCluserPaths := make(map[string]struct{}, len(existingClusters)) for _, path := range existingClusters { existingCluserPaths[path] = struct{}{} } discoveredClusters, err := zkzone.DiscoverClusters("/") if err != nil { this.Ui.Error(zkzone.Name() + ": " + err.Error()) return } // print each cluster state: new, normal for _, zkpath := range discoveredClusters { if _, present := existingCluserPaths[zkpath]; !present { this.Ui.Output(strings.Repeat(" ", 4) + color.Green("%s +++", zkpath)) } else { this.Ui.Output(strings.Repeat(" ", 4) + zkpath) } } // find the offline clusters for c, path := range existingClusters { path = strings.TrimSpace(path) foundOnline := false for _, p := range discoveredClusters { p = strings.TrimSpace(p) if p == path { foundOnline = true break } } if !foundOnline { this.Ui.Output(strings.Repeat(" ", 4) + color.Red("%s: %s ---", c, path)) } } }
func (this *Consumers) printConsumersByHost(zkzone *zk.ZkZone, clusterPattern string) { outputs := make(map[string]map[string]map[string]int) // host: {cluster: {topic: count}} this.Ui.Output(color.Blue(zkzone.Name())) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } consumerGroups := zkcluster.ConsumerGroups() for _, group := range consumerGroups { for _, c := range group { if _, present := outputs[c.Host()]; !present { outputs[c.Host()] = make(map[string]map[string]int) } if _, present := outputs[c.Host()][zkcluster.Name()]; !present { outputs[c.Host()][zkcluster.Name()] = make(map[string]int) } for topic, count := range c.Subscription { outputs[c.Host()][zkcluster.Name()][topic] += count } } } }) sortedHosts := make([]string, 0, len(outputs)) for host, _ := range outputs { sortedHosts = append(sortedHosts, host) } sort.Strings(sortedHosts) for _, host := range sortedHosts { tc := outputs[host] this.Ui.Output(fmt.Sprintf("%s %+v", color.Green("%22s", host), tc)) } }
func (this *Verify) verifyPub() { table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Kafka", "Stock", "PubSub", "Stock", "Diff", "?"}) for _, t := range this.topics { if t.KafkaTopicName == "" { continue } kafkaCluster := this.kafkaTopics[t.KafkaTopicName] if kafkaCluster == "" { this.Ui.Warn(fmt.Sprintf("invalid kafka topic: %s", t.KafkaTopicName)) continue } psubTopic := manager.Default.KafkaTopic(t.AppId, t.TopicName, "v1") offsets := this.pubOffsetDiff(t.KafkaTopicName, kafkaCluster, psubTopic, this.cluster) var diff string if offsets[0] == 0 && offsets[1] != 0 { diff = color.Yellow("%d", offsets[1]-offsets[0]) } else if math.Abs(float64(offsets[0]-offsets[1])) < 20 { diff = color.Green("%d", offsets[1]-offsets[0]) } else { diff = color.Red("%d", offsets[1]-offsets[0]) } problem := "N" if _, present := this.problemeticTopics[t.KafkaTopicName]; present { problem = color.Yellow("Y") } table.Append([]string{ t.KafkaTopicName, fmt.Sprintf("%d", offsets[0]), t.TopicName, fmt.Sprintf("%d", offsets[1]), diff, problem}) } table.Render() }
func (c *Cluster) debug(format string, v ...interface{}) { if c.logLevel <= LogLevelDebug { pc, file, line, ok := runtime.Caller(1) if !ok { file = "<?>" line = 0 } else { if i := strings.LastIndex(file, "/"); i >= 0 { file = file[i+1:] } } fn := runtime.FuncForPC(pc).Name() fnparts := strings.Split(fn, "/") t := time.Now() hour, min, sec := t.Clock() nanosec := t.Nanosecond() / 1e3 debugLock.Lock() var nodePrefix string = c.self.ID.String() switch c.color { case "red": nodePrefix = color.Red(c.self.ID.String()) case "blue": nodePrefix = color.Blue(c.self.ID.String()) case "yellow": nodePrefix = color.Yellow(c.self.ID.String()) case "green": nodePrefix = color.Green(c.self.ID.String()) } fmt.Printf(nodePrefix+" [%d:%d:%d.%04d] %s:%d(%s): %s\n", hour, min, sec, nanosec, file, line, color.Red(fnparts[len(fnparts)-1]), fmt.Sprintf(format, v...)) debugLock.Unlock() } }
func (this *Host) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("host", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", "", "") cmdFlags.StringVar(&this.host, "ip", "", "") if err := cmdFlags.Parse(args); err != nil { return 1 } if validateArgs(this, this.Ui). require("-z", "-h"). invalid(args) { return 2 } for { this.diagnose() this.Ui.Output(color.Green("%s", strings.Repeat("=", 40))) time.Sleep(time.Second * 5) } return }
func (this *Clusters) printClusters(zkzone *zk.ZkZone, clusterPattern string, port string) { if this.registeredBrokers { this.printRegisteredBrokers(zkzone) return } type clusterInfo struct { name, path string nickname string topicN, partitionN int err string priority int public bool retention int replicas int brokerInfos []zk.BrokerInfo } clusters := make([]clusterInfo, 0) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } ci := clusterInfo{ name: zkcluster.Name(), path: zkcluster.Chroot(), } if this.neat { clusters = append(clusters, ci) return } // verbose mode, will calculate topics and partition count brokerList := zkcluster.BrokerList() if len(brokerList) == 0 { ci.err = "no live brokers" clusters = append(clusters, ci) return } if port != "" { for _, hostport := range brokerList { _, p, err := net.SplitHostPort(hostport) swallow(err) if p != port { return } } } info := zkcluster.RegisteredInfo() if this.publicOnly && !info.Public { return } if !this.verbose { ci.brokerInfos = info.Roster clusters = append(clusters, ci) return } kfk, err := sarama.NewClient(brokerList, saramaConfig()) if err != nil { ci.err = err.Error() clusters = append(clusters, ci) return } topics, err := kfk.Topics() if err != nil { ci.err = err.Error() clusters = append(clusters, ci) return } partitionN := 0 for _, topic := range topics { partitions, err := kfk.Partitions(topic) if err != nil { ci.err = err.Error() clusters = append(clusters, ci) continue } partitionN += len(partitions) } clusters = append(clusters, clusterInfo{ name: zkcluster.Name(), nickname: info.Nickname, path: zkcluster.Chroot(), topicN: len(topics), partitionN: partitionN, retention: info.Retention, public: info.Public, replicas: info.Replicas, priority: info.Priority, brokerInfos: info.Roster, }) }) this.Ui.Output(fmt.Sprintf("%s: %d", zkzone.Name(), len(clusters))) if this.verbose { // 2 loop: 1. print the err clusters 2. print the good clusters for _, c := range clusters { if c.err == "" { continue } this.Ui.Output(fmt.Sprintf("%30s: %s %s", c.name, c.path, color.Red(c.err))) } // loop2 for _, c := range clusters { if c.err != "" { continue } this.Ui.Output(fmt.Sprintf("%30s: %s", c.name, c.path)) brokers := []string{} for _, broker := range c.brokerInfos { if this.ipInNumber { brokers = append(brokers, fmt.Sprintf("%d/%s:%d", broker.Id, broker.Host, broker.Port)) } else { brokers = append(brokers, fmt.Sprintf("%d/%s", broker.Id, broker.NamedAddr())) } } if len(brokers) > 0 { sort.Strings(brokers) this.Ui.Info(color.Green("%31s %s", " ", strings.Join(brokers, ", "))) } this.Ui.Output(strings.Repeat(" ", 4) + color.Green("nick:%s public:%v topics:%d partitions:%d replicas:%d retention:%dh", c.nickname, c.public, c.topicN, c.partitionN, c.replicas, c.retention)) } return } // not verbose mode hostsWithoutDnsRecords := make([]string, 0) for _, c := range clusters { this.Ui.Output(fmt.Sprintf("%30s: %s", c.name, c.path)) brokers := []string{} for _, broker := range c.brokerInfos { if this.ipInNumber { brokers = append(brokers, fmt.Sprintf("%d/%s:%d", broker.Id, broker.Host, broker.Port)) } else { brokers = append(brokers, fmt.Sprintf("%d/%s", broker.Id, broker.NamedAddr())) } if broker.Addr() == broker.NamedAddr() { hostsWithoutDnsRecords = append(hostsWithoutDnsRecords, fmt.Sprintf("%s:%s", c.name, broker.Addr())) } } if len(brokers) > 0 { sort.Strings(brokers) this.Ui.Info(color.Green("%31s %s", " ", strings.Join(brokers, ", "))) } else { this.Ui.Warn(fmt.Sprintf("%31s no live registered brokers", " ")) } } if len(hostsWithoutDnsRecords) > 0 { this.Ui.Warn("brokers without dns record:") for _, broker := range hostsWithoutDnsRecords { parts := strings.SplitN(broker, ":", 2) this.Ui.Output(fmt.Sprintf("%30s: %s", parts[0], color.Yellow(parts[1]))) } } }
func (*Sample) consumeSample() string { return fmt.Sprintf(` public class KafkaConsumer { private final ConsumerConnector consumer; private KafkaConsumer() { Properties props = new Properties(); props.put("%s", "zk2181a.wdds.zk.com:2181,zk2181b.wdds.zk.com:2181,zk2181c.wdds.zk.com:2181/kafka"); props.put("%s", "group1"); props.put("zookeeper.session.timeout.ms", "4000"); props.put("zookeeper.sync.time.ms", "200"); props.put("auto.commit.interval.ms", "60000"); // 1m //props.put("auto.offset.reset", "smallest"); // largest | smallest props.put("serializer.class", "kafka.serializer.StringEncoder"); ConsumerConfig config = new ConsumerConfig(props); consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config); } public void shutdown() { if (consumer != null) { consumer.shutdown(); } } void consume(String topic, int %s) { // %s // %s // %s Runtime.getRuntime().addShutdownHook(new Thread() { public void run() { consumer.shutdown(); } }); Map<String, Integer> topicCountMap = new HashMap<String, Integer>(); topicCountMap.put(topic, %s); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties()); StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties()); Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder); KafkaStream<String, String> stream = consumerMap.get(topic).get(0); ConsumerIterator<String, String> it = stream.iterator(); while (it.hasNext()) { // consumer.commitOffsets(); // manually commit offsets System.out.println(it.next().message()); } } public static void main(String[] args) { new KafkaConsumer().consume(); } } `, color.Cyan("zookeeper.connect"), color.Cyan("group.id"), color.Green("threads"), color.Red("VERY important!"), color.Red("graceful shutdown the consumer group to commit consumed offset"), color.Red("avoid consuming duplicated message when restarting the same consumer group"), color.Green("threads")) }
func (this *Console) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("console", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", ctx.ZkDefaultZone(), "") if err := cmdFlags.Parse(args); err != nil { return 1 } this.builtinCmds = []string{"help", "history", "ls", "cat", "pwd", "cd"} this.cwd = "/" this.zkzone = gzk.NewZkZone(gzk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) if err := this.zkzone.Connect(); err != nil { panic(err) } defer this.zkzone.Close() this.Line = liner.NewLiner() this.Line.SetCtrlCAborts(true) this.Line.SetCompleter(func(line string) (c []string) { p := strings.SplitN(line, " ", 2) if len(p) == 2 && strings.TrimSpace(p[1]) != "" { children, _, err := this.zkzone.Conn().Children(this.cwd) if err != nil { this.Ui.Error(err.Error()) return } for _, child := range children { if strings.HasPrefix(child, p[1]) { c = append(c, fmt.Sprintf("%s %s", p[0], child)) } } return } for cmd, _ := range this.Cmds { if strings.HasPrefix(cmd, strings.ToLower(line)) { c = append(c, cmd) } } for _, cmd := range this.builtinCmds { if strings.HasPrefix(cmd, strings.ToLower(line)) { c = append(c, cmd) } } c = append(c, this.builtinCmds...) return }) defer this.Line.Close() if usr, err := user.Current(); err == nil { this.historyFile = filepath.Join(usr.HomeDir, fmt.Sprintf(".%s_history", this.Cmd)) if f, e := os.Open(this.historyFile); e == nil { this.Line.ReadHistory(f) f.Close() } } for { this.refreshPrompt() line, err := this.Line.Prompt(color.Green("%s> ", this.prompt)) if err != nil { break } line = strings.TrimSpace(line) if line == "" { continue } if line == "bye" || line == "q" || line == "quit" || line == "exit" { break } this.runCommand(line) // write out the history if len(this.historyFile) > 0 { this.Line.AppendHistory(line) if f, e := os.Create(this.historyFile); e == nil { this.Line.WriteHistory(f) f.Close() } } } return }
func (this *Topics) displayTopicsOfCluster(zkcluster *zk.ZkCluster) { echoBuffer := func(lines []string) { for _, l := range lines { this.Ui.Output(l) } } linesInTopicMode := make([]string, 0) if this.verbose { linesInTopicMode = this.echoOrBuffer(zkcluster.Name(), linesInTopicMode) } // get all alive brokers within this cluster brokers := zkcluster.Brokers() if len(brokers) == 0 { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s", " ", color.Red("%s empty brokers", zkcluster.Name())), linesInTopicMode) echoBuffer(linesInTopicMode) return } if this.verbose { sortedBrokerIds := make([]string, 0, len(brokers)) for brokerId, _ := range brokers { sortedBrokerIds = append(sortedBrokerIds, brokerId) } sort.Strings(sortedBrokerIds) for _, brokerId := range sortedBrokerIds { if this.ipInNumber { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ", color.Green(brokerId), brokers[brokerId]), linesInTopicMode) } else { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ", color.Green(brokerId), brokers[brokerId].NamedString()), linesInTopicMode) } } } kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig()) if err != nil { if this.verbose { linesInTopicMode = this.echoOrBuffer(color.Yellow("%5s%+v %s", " ", zkcluster.BrokerList(), err.Error()), linesInTopicMode) } return } defer kfk.Close() topics, err := kfk.Topics() swallow(err) if len(topics) == 0 { if this.topicPattern == "" && this.verbose { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%5s%s", " ", color.Magenta("no topics")), linesInTopicMode) echoBuffer(linesInTopicMode) } return } sortedTopics := make([]string, 0, len(topics)) for _, t := range topics { sortedTopics = append(sortedTopics, t) } sort.Strings(sortedTopics) topicsCtime := zkcluster.TopicsCtime() hasTopicMatched := false for _, topic := range sortedTopics { if !patternMatched(topic, this.topicPattern) { continue } if this.since > 0 && time.Since(topicsCtime[topic]) > this.since { continue } this.topicN++ hasTopicMatched = true if this.verbose { linesInTopicMode = this.echoOrBuffer(strings.Repeat(" ", 4)+color.Cyan(topic), linesInTopicMode) } // get partitions and check if some dead alivePartitions, err := kfk.WritablePartitions(topic) swallow(err) partions, err := kfk.Partitions(topic) swallow(err) if len(alivePartitions) != len(partions) { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %s P: %s/%+v", zkcluster.Name(), color.Cyan("%-50s", topic), color.Red("partial dead"), color.Green("%+v", alivePartitions), partions), linesInTopicMode) } replicas, err := kfk.Replicas(topic, partions[0]) if err != nil { this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partions[0], err)) } this.partitionN += len(partions) if !this.verbose { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %3dP %dR %s", zkcluster.Name(), color.Cyan("%-50s", topic), len(partions), len(replicas), gofmt.PrettySince(topicsCtime[topic])), linesInTopicMode) continue } for _, partitionID := range alivePartitions { leader, err := kfk.Leader(topic, partitionID) swallow(err) replicas, err := kfk.Replicas(topic, partitionID) if err != nil { this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partitionID, err)) } isr, isrMtime, partitionCtime := zkcluster.Isr(topic, partitionID) isrMtimeSince := gofmt.PrettySince(isrMtime) if time.Since(isrMtime).Hours() < 24 { // ever out of sync last 24h isrMtimeSince = color.Magenta(isrMtimeSince) } underReplicated := false if len(isr) != len(replicas) { underReplicated = true } latestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetNewest) swallow(err) oldestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetOldest) swallow(err) if this.count > 0 && (latestOffset-oldestOffset) < this.count { continue } this.totalMsgs += latestOffset - oldestOffset this.totalOffsets += latestOffset if !underReplicated { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%+v Offset:%16s - %-16s Num:%-15s %s-%s", partitionID, color.Green("%d", leader.ID()), replicas, isr, gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset), gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode) } else { // use red for alert linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%s Offset:%16s - %-16s Num:%-15s %s-%s", partitionID, color.Green("%d", leader.ID()), replicas, color.Red("%+v", isr), gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset), gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode) } } } if this.topicPattern != "" { if hasTopicMatched { echoBuffer(linesInTopicMode) } } else { echoBuffer(linesInTopicMode) } }
func (this *Lags) printConsumersLag(zkcluster *zk.ZkCluster) { // sort by group name consumersByGroup := zkcluster.ConsumersByGroup(this.groupPattern) sortedGroups := make([]string, 0, len(consumersByGroup)) for group, _ := range consumersByGroup { sortedGroups = append(sortedGroups, group) } sort.Strings(sortedGroups) for _, group := range sortedGroups { lines := make([]string, 0, 100) sortedTopicAndPartitionIds := make([]string, 0) consumers := make(map[string]zk.ConsumerMeta) for _, t := range consumersByGroup[group] { key := fmt.Sprintf("%s:%s", t.Topic, t.PartitionId) sortedTopicAndPartitionIds = append(sortedTopicAndPartitionIds, key) consumers[key] = t } sort.Strings(sortedTopicAndPartitionIds) for _, topicAndPartitionId := range sortedTopicAndPartitionIds { consumer := consumers[topicAndPartitionId] if !patternMatched(consumer.Topic, this.topicPattern) { continue } var ( lagOutput string symbol string ) if consumer.Lag > int64(this.lagThreshold) { lagOutput = color.Red("%15s", gofmt.Comma(consumer.Lag)) if consumer.Online { symbol = color.Yellow("⚠︎︎") } else { symbol = color.Yellow("◎") } } else { lagOutput = color.Blue("%15s", gofmt.Comma(consumer.Lag)) if consumer.Online { symbol = color.Green("◉") } else { symbol = color.Yellow("◎") } } if consumer.Online { if this.problematicMode && consumer.Lag <= int64(this.lagThreshold) { continue } var ( host string uptime string ) if consumer.ConsumerZnode == nil { host = "unrecognized" uptime = "-" } else { host = color.Green("%s", consumer.ConsumerZnode.Host()) if time.Since(consumer.ConsumerZnode.Uptime()) < time.Hour { uptime = color.Magenta(gofmt.PrettySince(consumer.ConsumerZnode.Uptime())) } else { uptime = gofmt.PrettySince(consumer.ConsumerZnode.Uptime()) } } lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-15s %s %-10s %s %s", symbol, consumer.Topic, consumer.PartitionId, gofmt.Comma(consumer.ProducerOffset), gofmt.Comma(consumer.ConsumerOffset), lagOutput, gofmt.PrettySince(consumer.Mtime.Time()), host, uptime)) } else if !this.onlineOnly { lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-12s %s %s", symbol, consumer.Topic, consumer.PartitionId, gofmt.Comma(consumer.ProducerOffset), gofmt.Comma(consumer.ConsumerOffset), lagOutput, gofmt.PrettySince(consumer.Mtime.Time()))) } } if len(lines) > 0 { this.Ui.Output(strings.Repeat(" ", 4) + group) for _, l := range lines { this.Ui.Output(l) } } } }
func sub(id int) { cf := api.DefaultConfig("app2", "mysecret") cf.Debug = true cf.Sub.Endpoint = addr c := api.NewClient(cf) i := 0 t0 := time.Now() var err error opt := api.SubOption{ AppId: appid, Topic: topic, Ver: "v1", Group: group, Tag: tag, } if mode == "subx" { err = c.SubX(opt, func(statusCode int, msg []byte, r *api.SubXResult) error { i++ if n > 0 && i >= n { return api.ErrSubStop } if i%step == 0 { log.Println(statusCode, string(msg)) } if sleep > 0 { time.Sleep(sleep) } // handle the msg here if rand.Int()%2 == 0 { // simulate handle this msg successfully log.Println(color.Green("ok")) } else { // this msg was not successfully handled if rand.Int()%2 == 0 { // after retry several times, give up r.Bury = api.ShadowRetry log.Println(color.Red("shadow")) } else { // simulate handle msg successfully after retry if sleep > 0 { time.Sleep(sleep) } log.Println(color.Yellow("retried")) } } log.Println() return nil }) } else { err = c.Sub(opt, func(statusCode int, msg []byte) error { i++ if n > 0 && i >= n { return api.ErrSubStop } if i%step == 0 { log.Println(id, statusCode, string(msg)) } if sleep > 0 { time.Sleep(sleep) } return nil }) } if err != nil { log.Println(err) } elapsed := time.Since(t0) log.Printf("%d msgs in %s, tps: %.2f\n", n, elapsed, float64(n)/elapsed.Seconds()) }
func (this *Get) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("get", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", ctx.ZkDefaultZone(), "") cmdFlags.BoolVar(&this.verbose, "l", false, "") cmdFlags.BoolVar(&this.recursive, "R", false, "") cmdFlags.StringVar(&this.likePattern, "like", "", "") if err := cmdFlags.Parse(args); err != nil { return 1 } if this.zone == "" { this.Ui.Error("unknown zone") return 2 } if len(args) == 0 { this.Ui.Error("missing path") return 2 } this.path = args[len(args)-1] zkzone := gzk.NewZkZone(gzk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) defer zkzone.Close() if this.recursive { data, stat, err := zkzone.Conn().Get(this.path) must(err) if stat.EphemeralOwner > 0 { this.Ui.Output(color.Yellow(this.path)) } else { this.Ui.Output(color.Green(this.path)) } if len(data) > 0 { if this.verbose { this.Ui.Output(fmt.Sprintf("%s %#v", strings.Repeat(" ", 3), stat)) this.Ui.Output(fmt.Sprintf("%s %v", strings.Repeat(" ", 3), data)) } this.Ui.Output(fmt.Sprintf("%s %s", strings.Repeat(" ", 3), string(data))) } this.showChildrenRecursively(zkzone.Conn(), this.path) return 0 } conn := zkzone.Conn() data, stat, err := conn.Get(this.path) must(err) if len(data) == 0 { this.Ui.Output("empty znode") return } if this.verbose { this.Ui.Output(color.Magenta("%#v", *stat)) } this.Ui.Output(color.Green("Data Bytes")) fmt.Println(data) this.Ui.Output(color.Green("Data as String")) this.Ui.Output(string(data)) return }
func (this *Kateway) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("kateway", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", "", "") cmdFlags.BoolVar(&this.configMode, "cf", false, "") cmdFlags.StringVar(&this.id, "id", "", "") cmdFlags.BoolVar(&this.install, "i", false, "") cmdFlags.BoolVar(&this.longFmt, "l", false, "") cmdFlags.StringVar(&this.configOption, "option", "", "") cmdFlags.BoolVar(&this.versionOnly, "ver", false, "") cmdFlags.BoolVar(&this.flameGraph, "flame", false, "") cmdFlags.StringVar(&this.logLevel, "loglevel", "", "") cmdFlags.StringVar(&this.visualLog, "visualog", "", "") cmdFlags.BoolVar(&this.showZkNodes, "zk", false, "") cmdFlags.BoolVar(&this.checkup, "checkup", false, "") cmdFlags.BoolVar(&this.benchmark, "bench", false, "") cmdFlags.StringVar(&this.benchmarkMaster, "master", "", "") cmdFlags.BoolVar(&this.pub, "pub", false, "") cmdFlags.BoolVar(&this.sub, "sub", false, "") cmdFlags.BoolVar(&this.benchmarkAsync, "async", false, "") cmdFlags.BoolVar(&this.curl, "curl", false, "") if err := cmdFlags.Parse(args); err != nil { return 2 } if this.benchmark { if validateArgs(this, this.Ui). require("-z"). requireAdminRights("-z"). invalid(args) { return 2 } zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) zone := ctx.Zone(zkzone.Name()) this.benchApp = zone.SmokeApp this.benchSecret = zone.SmokeSecret this.benchTopic = zone.SmokeTopic this.benchVer = zone.SmokeTopicVersion this.benchPubEndpoint = zone.PubEndpoint if this.id != "" { kws, err := zkzone.KatewayInfos() swallow(err) for _, kw := range kws { if kw.Id == this.id { this.benchPubEndpoint = kw.PubAddr break } } } this.benchId = fmt.Sprintf("%s-%s", ctx.Hostname(), strings.Replace(uuid.New(), "-", "", -1)) this.runBenchmark(zkzone) return } if this.flameGraph { if validateArgs(this, this.Ui). require("-z", "-id"). requireAdminRights("-z"). invalid(args) { return 2 } zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) this.generateFlameGraph(zkzone) return } if this.visualLog != "" { this.doVisualize() return } if this.pub { zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) this.runPub(zkzone) return } if this.sub { zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) this.runSub(zkzone) return } if this.install { if validateArgs(this, this.Ui). require("-z"). invalid(args) { return 2 } zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) this.installGuide(zkzone) return } if this.configOption != "" { this.configMode = true } if this.configMode { if validateArgs(this, this.Ui). require("-z"). requireAdminRights("-z"). invalid(args) { return 2 } zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) if this.logLevel != "" { if this.id != "" { kw := zkzone.KatewayInfoById(this.id) if kw == nil { panic(fmt.Sprintf("kateway %s invalid entry found in zk", this.id)) } this.callKateway(kw, "PUT", fmt.Sprintf("v1/log/%s", this.logLevel)) } else { // apply on all kateways kws, _ := zkzone.KatewayInfos() for _, kw := range kws { this.callKateway(kw, "PUT", fmt.Sprintf("v1/log/%s", this.logLevel)) } } } if this.configOption != "" { parts := strings.SplitN(this.configOption, "=", 2) if len(parts) != 2 { this.Ui.Error("usage: key=value") return } k, v := parts[0], parts[1] if this.id != "" { kw := zkzone.KatewayInfoById(this.id) if kw == nil { panic(fmt.Sprintf("kateway %s invalid entry found in zk", this.id)) } this.callKateway(kw, "PUT", fmt.Sprintf("v1/options/%s/%s", k, v)) } else { // apply on all kateways kws, _ := zkzone.KatewayInfos() for _, kw := range kws { this.callKateway(kw, "PUT", fmt.Sprintf("v1/options/%s/%s", k, v)) } } } return } if this.checkup { if this.zone == "" { forAllSortedZones(func(zkzone *zk.ZkZone) { this.runCheckup(zkzone) }) return } zkzone := zk.NewZkZone(zk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) this.runCheckup(zkzone) return } if this.showZkNodes { this.Ui.Output(fmt.Sprintf(`%s pubsub manager db dsn %s job db cluster config %s turn off webhook dir`, color.Green("%-50s", zk.KatewayMysqlPath), color.Green("%-50s", zk.PubsubJobConfig), color.Green("%-50s", zk.PubsubWebhooksOff))) return } // display mode lines := make([]string, 0) header := "Zone|Id|Ip|Pprof|Build|Cpu|Heap|Obj|Go|P/S|hhIn/hhOut|Uptime" lines = append(lines, header) forSortedZones(func(zkzone *zk.ZkZone) { if this.zone != "" && zkzone.Name() != this.zone { return } if !this.versionOnly { mysqlDsn, err := zkzone.KatewayMysqlDsn() if err != nil { this.Ui.Warn(fmt.Sprintf("kateway[%s] mysql DSN not set on zk yet", zkzone.Name())) this.Ui.Output(fmt.Sprintf("e,g. %s -> pubsub:pubsub@tcp(10.77.135.217:10010)/pubsub?charset=utf8&timeout=10s", zk.KatewayMysqlPath)) } else { this.Ui.Output(fmt.Sprintf("zone[%s] manager db: %s", color.Cyan(zkzone.Name()), mysqlDsn)) } } kateways, err := zkzone.KatewayInfos() if err != nil { if err == zklib.ErrNoNode { this.Ui.Output("no kateway running") return } else { swallow(err) } } for _, kw := range kateways { if this.id != "" && this.id != kw.Id { continue } statusMap, _ := this.getKatewayStatusMap(kw.ManAddr) logLevel, _ := statusMap["loglevel"].(string) heapSize, _ := statusMap["heap"].(string) heapObjs, _ := statusMap["objects"].(string) pubConn, _ := statusMap["pubconn"].(string) hhAppendN, _ := statusMap["hh_appends"].(string) hhDeliverN, _ := statusMap["hh_delivers"].(string) subConn, _ := statusMap["subconn"].(string) goN, _ := statusMap["goroutines"].(string) if this.versionOnly { pprofAddr := kw.DebugAddr if len(pprofAddr) > 0 && pprofAddr[0] == ':' { pprofAddr = kw.Ip + pprofAddr } pprofAddr = fmt.Sprintf("%s/debug/pprof/", pprofAddr) lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s/%s|%s|%s|%s|%s|%s/%s|%s/%s|%s", zkzone.Name(), kw.Id, kw.Ip, pprofAddr, kw.Build, kw.BuiltAt, kw.Cpu, heapSize, heapObjs, goN, pubConn, subConn, hhAppendN, hhDeliverN, gofmt.PrettySince(kw.Ctime))) continue } this.Ui.Info(fmt.Sprintf("id:%-2s host:%s cpu:%-2s up:%s", kw.Id, kw.Host, kw.Cpu, gofmt.PrettySince(kw.Ctime))) this.Ui.Output(fmt.Sprintf(" ver: %s\n arch: %s\n build: %s\n built: %s\n log: %s\n pub: %s\n sub: %s\n man: %s\n dbg: %s", kw.Ver, kw.Arch, color.Red(kw.Build), kw.BuiltAt, logLevel, kw.PubAddr, kw.SubAddr, kw.ManAddr, kw.DebugAddr, )) if this.longFmt { this.Ui.Output(" full status:") this.Ui.Output(this.getKatewayStatus(kw.ManAddr)) } } }) if this.versionOnly && len(lines) > 1 { fmt.Println(columnize.SimpleFormat(lines)) } return }
func (this *Peek) Run(args []string) (exitCode int) { var ( cluster string zone string topicPattern string partitionId int wait time.Duration silence bool ) cmdFlags := flag.NewFlagSet("peek", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&zone, "z", ctx.ZkDefaultZone(), "") cmdFlags.StringVar(&cluster, "c", "", "") cmdFlags.StringVar(&topicPattern, "t", "", "") cmdFlags.IntVar(&partitionId, "p", 0, "") cmdFlags.BoolVar(&this.colorize, "color", true, "") cmdFlags.Int64Var(&this.lastN, "last", -1, "") cmdFlags.BoolVar(&this.pretty, "pretty", false, "") cmdFlags.IntVar(&this.limit, "n", -1, "") cmdFlags.StringVar(&this.column, "col", "", "") // TODO support multiple columns cmdFlags.BoolVar(&this.beep, "beep", false, "") cmdFlags.Int64Var(&this.offset, "offset", sarama.OffsetNewest, "") cmdFlags.BoolVar(&silence, "s", false, "") cmdFlags.DurationVar(&wait, "d", time.Hour, "") cmdFlags.BoolVar(&this.bodyOnly, "body", false, "") if err := cmdFlags.Parse(args); err != nil { return 1 } if this.pretty { this.bodyOnly = true } this.quit = make(chan struct{}) if silence { stats := newPeekStats() go stats.start() } zkzone := zk.NewZkZone(zk.DefaultConfig(zone, ctx.ZoneZkAddrs(zone))) msgChan := make(chan *sarama.ConsumerMessage, 20000) // msg aggerator channel if cluster == "" { zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { this.consumeCluster(zkcluster, topicPattern, partitionId, msgChan) }) } else { zkcluster := zkzone.NewCluster(cluster) this.consumeCluster(zkcluster, topicPattern, partitionId, msgChan) } signal.RegisterHandler(func(sig os.Signal) { log.Printf("received signal: %s", strings.ToUpper(sig.String())) log.Println("quiting...") this.once.Do(func() { close(this.quit) }) }, syscall.SIGINT, syscall.SIGTERM) var ( startAt = time.Now() msg *sarama.ConsumerMessage total int bytesN int64 ) var ( j map[string]interface{} prettyJSON bytes.Buffer ) LOOP: for { if time.Since(startAt) >= wait { this.Ui.Output(fmt.Sprintf("Total: %s msgs, %s, elapsed: %s", gofmt.Comma(int64(total)), gofmt.ByteSize(bytesN), time.Since(startAt))) elapsed := time.Since(startAt).Seconds() if elapsed > 1. { this.Ui.Output(fmt.Sprintf("Speed: %d/s", total/int(elapsed))) if total > 0 { this.Ui.Output(fmt.Sprintf("Size : %s/msg", gofmt.ByteSize(bytesN/int64(total)))) } } return } select { case <-this.quit: this.Ui.Output(fmt.Sprintf("Total: %s msgs, %s, elapsed: %s", gofmt.Comma(int64(total)), gofmt.ByteSize(bytesN), time.Since(startAt))) elapsed := time.Since(startAt).Seconds() if elapsed > 1. { this.Ui.Output(fmt.Sprintf("Speed: %d/s", total/int(elapsed))) if total > 0 { this.Ui.Output(fmt.Sprintf("Size : %s/msg", gofmt.ByteSize(bytesN/int64(total)))) } } return case <-time.After(time.Second): continue case msg = <-msgChan: if silence { stats.MsgCountPerSecond.Mark(1) stats.MsgBytesPerSecond.Mark(int64(len(msg.Value))) } else { var outmsg string if this.column != "" { if err := json.Unmarshal(msg.Value, &j); err != nil { this.Ui.Error(err.Error()) } else { var colVal string switch t := j[this.column].(type) { case string: colVal = t case float64: colVal = fmt.Sprintf("%.0f", t) case int: colVal = fmt.Sprintf("%d", t) } if this.bodyOnly { if this.pretty { if err = json.Indent(&prettyJSON, []byte(colVal), "", " "); err != nil { fmt.Println(err.Error()) } else { outmsg = string(prettyJSON.Bytes()) } } else { outmsg = colVal } } else if this.colorize { outmsg = fmt.Sprintf("%s/%d %s k:%s v:%s", color.Green(msg.Topic), msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), colVal) } else { // colored UI will have invisible chars output outmsg = fmt.Sprintf("%s/%d %s k:%s v:%s", msg.Topic, msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), colVal) } } } else { if this.bodyOnly { if this.pretty { json.Indent(&prettyJSON, msg.Value, "", " ") outmsg = string(prettyJSON.Bytes()) } else { outmsg = string(msg.Value) } } else if this.colorize { outmsg = fmt.Sprintf("%s/%d %s k:%s, v:%s", color.Green(msg.Topic), msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), string(msg.Value)) } else { // colored UI will have invisible chars output outmsg = fmt.Sprintf("%s/%d %s k:%s, v:%s", msg.Topic, msg.Partition, gofmt.Comma(msg.Offset), string(msg.Key), string(msg.Value)) } } if outmsg != "" { if this.beep { outmsg += "\a" } this.Ui.Output(outmsg) } } total++ bytesN += int64(len(msg.Value)) if this.limit > 0 && total >= this.limit { break LOOP } if this.lastN > 0 && total >= int(this.lastN) { break LOOP } } } return }
func (this *Topology) displayZoneTopology(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) // {cluster: {topic: brokerHostInfo}} brokerInstances := make(map[string]map[string]*brokerHostInfo) zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { if len(liveBrokers) == 0 { this.Ui.Warn(fmt.Sprintf("empty brokers in cluster[%s]", cluster)) return } if this.cluster != "" && this.cluster != cluster { return } brokerInstances[cluster] = make(map[string]*brokerHostInfo) for _, broker := range liveBrokers { if !patternMatched(broker.Host, this.hostPattern) { continue } if _, present := brokerInstances[cluster][broker.Host]; !present { brokerInstances[cluster][broker.Host] = newBrokerHostInfo() } brokerInstances[cluster][broker.Host].addPort(broker.Port, broker.Uptime()) } // find how many partitions a broker is leading zkcluster := zkzone.NewCluster(cluster) brokerList := zkcluster.BrokerList() if len(brokerList) == 0 { this.Ui.Warn(fmt.Sprintf("empty brokers in cluster[%s]", cluster)) return } kfk, err := sarama.NewClient(brokerList, sarama.NewConfig()) if err != nil { this.Ui.Error(color.Red(" %+v %s", brokerList, err.Error())) return } topics, err := kfk.Topics() swallow(err) for _, topic := range topics { partions, err := kfk.WritablePartitions(topic) swallow(err) for _, partitionID := range partions { leader, err := kfk.Leader(topic, partitionID) swallow(err) host, _, err := net.SplitHostPort(leader.Addr()) swallow(err) if !patternMatched(host, this.hostPattern) { continue } latestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetNewest) if err != nil { this.Ui.Error(fmt.Sprintf("%s %s %v", cluster, topic, err)) continue } oldestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetOldest) if err != nil { this.Ui.Error(fmt.Sprintf("%s %s %v", cluster, topic, err)) continue } brokerInstances[cluster][host].topicMsgs[topic] += (latestOffset - oldestOffset) brokerInstances[cluster][host].addTopicPartition(topic, partitionID) } } }) hosts := make(map[string]struct{}) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { for host, _ := range brokerInstances[zkcluster.Name()] { hosts[host] = struct{}{} } }) sortedHosts := make([]string, 0) for host, _ := range hosts { sortedHosts = append(sortedHosts, host) } sort.Strings(sortedHosts) // sort by host ip sortedClusters := make([]string, 0, len(brokerInstances)) for c, _ := range brokerInstances { sortedClusters = append(sortedClusters, c) } sort.Strings(sortedClusters) portN := 0 hostN := 0 topicN := 0 partitionN := 0 for _, host := range sortedHosts { tn := 0 pn := 0 mn := int64(0) ports := make([]int, 0) for _, cluster := range sortedClusters { if _, present := brokerInstances[cluster][host]; !present { continue } tn += len(brokerInstances[cluster][host].topicPartitions) pn += brokerInstances[cluster][host].leadingPartitions() mn += brokerInstances[cluster][host].totalMsgsInStock() ports = append(ports, brokerInstances[cluster][host].tcpPorts...) } portN += len(ports) topicN += tn partitionN += pn hostN += 1 this.Ui.Output(fmt.Sprintf(" %s leading: %2dT %3dP %15sM ports %2d:%+v", color.Green("%15s", host), tn, pn, gofmt.Comma(mn), len(ports), ports)) if this.verbose { for _, cluster := range sortedClusters { if _, present := brokerInstances[cluster][host]; !present { continue } for _, tcpPort := range brokerInstances[cluster][host].tcpPorts { this.Ui.Output(fmt.Sprintf("%40d %s", tcpPort, gofmt.PrettySince(brokerInstances[cluster][host].uptimes[tcpPort]))) } } for _, cluster := range sortedClusters { if _, present := brokerInstances[cluster][host]; !present { continue } this.Ui.Output(color.Magenta("%30s", cluster)) for topic, partitions := range brokerInstances[cluster][host].topicPartitions { this.Ui.Output(fmt.Sprintf("%40s: %15sM P%2d %+v", topic, gofmt.Comma(brokerInstances[cluster][host].topicMsgs[topic]), len(partitions), partitions)) } } } } this.Ui.Output(fmt.Sprintf("%17s host:%d, topic:%d, partition:%d, instance:%d", "-TOTAL-", hostN, topicN, partitionN, portN)) }