// Print all controllers of all clusters within a zone. func (this *Controllers) printControllers(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) zkzone.ForSortedControllers(func(cluster string, controller *zk.ControllerMeta) { if !patternMatched(cluster, this.cluster) { return } this.Ui.Output(strings.Repeat(" ", 4) + cluster) if controller == nil { this.Ui.Output(fmt.Sprintf("\t%s", color.Red("empty"))) } else { epochSince := time.Since(controller.Mtime.Time()) epochSinceStr := gofmt.PrettySince(controller.Mtime.Time()) if epochSince < time.Hour*2*24 { epochSinceStr = color.Red(epochSinceStr) } this.Ui.Output(fmt.Sprintf("\t%-2s %21s epoch:%2s/%-20s uptime:%s", controller.Broker.Id, controller.Broker.Addr(), controller.Epoch, epochSinceStr, gofmt.PrettySince(controller.Broker.Uptime()))) } }) }
func (this *Zookeeper) printLeader(zkzone *zk.ZkZone) { // FIXME all zones will only show the 1st zone info because it blocks others for { this.Ui.Output(color.Blue(zkzone.Name())) for zkhost, lines := range zkzone.RunZkFourLetterCommand("mntr") { if this.zkHost != "" && !strings.HasPrefix(zkhost, this.zkHost+":") { continue } parts := strings.Split(lines, "\n") for _, l := range parts { if strings.HasPrefix(l, "zk_server_state") && strings.HasSuffix(l, "leader") { this.Ui.Output(color.Green("%28s", zkhost)) break } } } if this.watchMode { time.Sleep(time.Second * 5) } else { break } } }
func (this *Clusters) printSummary(zkzone *zk.ZkZone, clusterPattern string, port string) { lines := []string{"Zone|Cluster|Brokers|Topics|Partitions|FlatMsg|Cum"} type summary struct { zone, cluster string brokers, topics, partitions int flat, cum int64 } summaries := make([]summary, 0, 10) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } brokers, topics, partitions, flat, cum := this.clusterSummary(zkcluster) summaries = append(summaries, summary{zkzone.Name(), zkcluster.Name(), brokers, topics, partitions, flat, cum}) }) sortutil.DescByField(summaries, "cum") var totalFlat, totalCum int64 for _, s := range summaries { lines = append(lines, fmt.Sprintf("%s|%s|%d|%d|%d|%s|%s", s.zone, s.cluster, s.brokers, s.topics, s.partitions, gofmt.Comma(s.flat), gofmt.Comma(s.cum))) totalCum += s.cum totalFlat += s.flat } this.Ui.Output(columnize.SimpleFormat(lines)) this.Ui.Output(fmt.Sprintf("Flat:%s Cum:%s", gofmt.Comma(totalFlat), gofmt.Comma(totalCum))) }
func (this *Kateway) generateFlameGraph(zkzone *zk.ZkZone) { kws, _ := zkzone.KatewayInfos() for _, kw := range kws { if kw.Id != this.id { continue } pprofAddr := kw.DebugAddr if len(pprofAddr) > 0 && pprofAddr[0] == ':' { pprofAddr = kw.Ip + pprofAddr } pprofAddr = fmt.Sprintf("http://%s/debug/pprof/profile", pprofAddr) cmd := pipestream.New(os.Getenv("GOPATH")+"/bin/go-torch", "-u", pprofAddr) err := cmd.Open() swallow(err) defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { fmt.Println(scanner.Text()) } this.Ui.Output("torch.svg generated") } }
func (this *Kateway) runSub(zkzone *zk.ZkZone) { zone := ctx.Zone(zkzone.Name()) cf := api.DefaultConfig(zone.SmokeApp, zone.SmokeSecret) cf.Sub.Endpoint = zone.SubEndpoint cli := api.NewClient(cf) t1 := time.Now() err := cli.Sub(api.SubOption{ AppId: zone.SmokeHisApp, Topic: zone.SmokeTopic, Ver: zone.SmokeTopicVersion, Group: zone.SmokeGroup, AutoClose: false, }, func(statusCode int, subMsg []byte) error { now := time.Now() var e error if statusCode != http.StatusOK { e = fmt.Errorf("unexpected http status: %s, body:%s", http.StatusText(statusCode), string(subMsg)) } this.Ui.Output(fmt.Sprintf("-> %s: %s %v", now.Sub(t1), string(subMsg), e)) time.Sleep(time.Millisecond * 100) t1 = time.Now() return e }) if err != nil { this.Ui.Error(err.Error()) } }
func (this *Kateway) installGuide(zkzone *zk.ZkZone) { this.Ui.Output(color.Red("manager db GRANT access rights to this ip")) this.Ui.Output(color.Red("gk deploy -kfkonly")) this.Ui.Output("") this.Ui.Output("mkdir -p /var/wd/kateway/sbin") this.Ui.Output("cd /var/wd/kateway") kateways, err := zkzone.KatewayInfos() swallow(err) nextId := 1 for _, kw := range kateways { id, _ := strconv.Atoi(kw.Id) if nextId < id { nextId = id } } nextId++ zone := ctx.Zone(this.zone) influxAddr := zone.InfluxAddr if influxAddr != "" && !strings.HasPrefix(influxAddr, "http://") { influxAddr = "http://" + influxAddr } var influxInfo string if influxAddr != "" { influxInfo = "-influxdbaddr " + influxAddr } this.Ui.Output(fmt.Sprintf(`nohup ./sbin/kateway -zone prod -id %d -debughttp ":10194" -level trace -log kateway.log -crashlog panic %s &`, nextId, influxInfo)) this.Ui.Output("") this.Ui.Output("yum install -y logstash") this.Ui.Output("/etc/logstash/conf.d/kateway.conf") this.Ui.Output(strings.TrimSpace(fmt.Sprintf(` input { file { path => "/var/wd/kateway/kateway.log" type => "kateway" } file { path => "/var/wd/kateway/panic" type => "kateway_panic" } } output { kafka { bootstrap_servers => "%s:11003,%s:11003" topic_id => "pubsub_log" } } `, color.Red("k11003a.mycorp.kfk.com"), color.Red("k11003b.mycorp.kfk.com")))) this.Ui.Output("") this.Ui.Output("chkconfig --add logstash") this.Ui.Output("/etc/init.d/logstash start") }
func printSwallowedErrors(ui cli.Ui, zkzone *zk.ZkZone) { errs := zkzone.Errors() if len(errs) == 0 { return } for _, e := range errs { ui.Error(color.Red("%v", e)) } }
func (this *Topology) displayZoneMaxPort(zkzone *zk.ZkZone) { maxPort := 0 zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { for _, broker := range liveBrokers { if maxPort < broker.Port { maxPort = broker.Port } } }) this.Ui.Output(fmt.Sprintf("max port in zone[%s]: %d", zkzone.Name(), maxPort)) }
func (this *Brokers) maxBrokerId(zkzone *zk.ZkZone, clusterName string) int { var maxBrokerId int zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { if cluster == clusterName { for _, b := range liveBrokers { id, _ := strconv.Atoi(b.Id) if id > maxBrokerId { maxBrokerId = id } } } }) return maxBrokerId }
func (this *Kateway) runBenchmark(zkzone *zk.ZkZone) { this.Ui.Info(fmt.Sprintf("benchmark[%s] zone[%s] %s.%s.%s %s", this.benchId, zkzone.Name(), this.benchApp, this.benchTopic, this.benchVer, this.benchPubEndpoint)) yes, _ := this.Ui.Ask("Are you sure to execute the benchmark? [Y/N]") if yes == "Y" { log.SetOutput(os.Stdout) stress.Flags.Round = 5 stress.Flags.Tick = 5 if this.benchmarkMaster != "" { stress.Flags.MasterAddr = stress.MasterAddr(this.benchmarkMaster) } stress.RunStress(this.benchPub) } }
func (this *Kateway) runPub(zkzone *zk.ZkZone) { zone := ctx.Zone(zkzone.Name()) cf := api.DefaultConfig(zone.SmokeApp, zone.SmokeSecret) cf.Pub.Endpoint = zone.PubEndpoint cli := api.NewClient(cf) for { now := time.Now() pubMsg := fmt.Sprintf("gk kateway -pub smoke test msg: [%s]", now) err := cli.Pub("", []byte(pubMsg), api.PubOption{ Topic: zone.SmokeTopic, Ver: zone.SmokeTopicVersion, }) this.Ui.Output(fmt.Sprintf("<- %s: %s %v", time.Since(now), pubMsg, err)) time.Sleep(time.Millisecond * 100) } }
func (this *Zookeeper) printZkStats(zkzone *zk.ZkZone) { for { this.Ui.Output(color.Blue(zkzone.Name())) for zkhost, lines := range zkzone.RunZkFourLetterCommand(this.flw) { if this.zkHost != "" && !strings.HasPrefix(zkhost, this.zkHost+":") { continue } this.Ui.Output(fmt.Sprintf("%s\n%s", color.Green("%28s", zkhost), lines)) } if this.watchMode { time.Sleep(time.Second * 5) } else { break } } }
func (this *Clusters) printRegisteredBrokers(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { info := zkcluster.RegisteredInfo() this.Ui.Output(fmt.Sprintf(" %s(%s)", info.Name(), info.Nickname)) registeredBrokers := info.Roster if len(registeredBrokers) == 0 { this.Ui.Warn(" brokers not registered") } else { for _, b := range registeredBrokers { if this.ipInNumber { this.Ui.Output(fmt.Sprintf(" %2d %s", b.Id, b.Addr())) } else { this.Ui.Output(fmt.Sprintf(" %2d %s", b.Id, b.NamedAddr())) } } } }) }
func (this *Zktop) displayZoneTop(zkzone *zk.ZkZone) { if this.batchMode { this.Ui.Output(fmt.Sprintf("%s %s", zkzone.Name(), bjtime.NowBj())) } else { this.Ui.Output(color.Green(zkzone.Name())) } header := "VER SERVER PORT M OUTST RECVD SENT CONNS ZNODES LAT(MIN/AVG/MAX)" this.Ui.Output(header) stats := zkzone.RunZkFourLetterCommand("stat") sortedHosts := make([]string, 0, len(stats)) for hp, _ := range stats { sortedHosts = append(sortedHosts, hp) } sort.Strings(sortedHosts) for _, hostPort := range sortedHosts { host, port, err := net.SplitHostPort(hostPort) if err != nil { panic(err) } stat := zk.ParseStatResult(stats[hostPort]) if stat.Mode == "" { if this.batchMode { stat.Mode = "E" } else { stat.Mode = color.Red("E") } } else if stat.Mode == "L" && !this.batchMode { stat.Mode = color.Blue(stat.Mode) } var sentQps, recvQps int if lastRecv, present := this.lastRecvs[hostPort]; present { r1, _ := strconv.Atoi(stat.Received) r0, _ := strconv.Atoi(lastRecv) recvQps = (r1 - r0) / int(this.refreshInterval.Seconds()) s1, _ := strconv.Atoi(stat.Sent) s0, _ := strconv.Atoi(this.lastSents[hostPort]) sentQps = (s1 - s0) / int(this.refreshInterval.Seconds()) } this.Ui.Output(fmt.Sprintf("%-15s %-15s %5s %1s %6s %16s %16s %5s %7s %s", stat.Version, // 15 host, // 15 port, // 5 stat.Mode, // 1 stat.Outstanding, // 6 fmt.Sprintf("%s/%d", stat.Received, recvQps), // 16 fmt.Sprintf("%s/%d", stat.Sent, sentQps), // 16 stat.Connections, // 5 stat.Znodes, // 7 stat.Latency, )) this.lastRecvs[hostPort] = stat.Received this.lastSents[hostPort] = stat.Sent } }
func (this *Consumers) printConsumersByHost(zkzone *zk.ZkZone, clusterPattern string) { outputs := make(map[string]map[string]map[string]int) // host: {cluster: {topic: count}} this.Ui.Output(color.Blue(zkzone.Name())) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } consumerGroups := zkcluster.ConsumerGroups() for _, group := range consumerGroups { for _, c := range group { if _, present := outputs[c.Host()]; !present { outputs[c.Host()] = make(map[string]map[string]int) } if _, present := outputs[c.Host()][zkcluster.Name()]; !present { outputs[c.Host()][zkcluster.Name()] = make(map[string]int) } for topic, count := range c.Subscription { outputs[c.Host()][zkcluster.Name()][topic] += count } } } }) sortedHosts := make([]string, 0, len(outputs)) for host, _ := range outputs { sortedHosts = append(sortedHosts, host) } sort.Strings(sortedHosts) for _, host := range sortedHosts { tc := outputs[host] this.Ui.Output(fmt.Sprintf("%s %+v", color.Green("%22s", host), tc)) } }
func (this *Topics) printSummary(zkzone *zk.ZkZone, clusterPattern string) { lines := []string{"Zone|Cluster|Topic|Partitions|FlatMsg|Cum"} var totalFlat, totalCum int64 zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } summaries := this.clusterSummary(zkcluster) sortutil.DescByField(summaries, "cum") for _, s := range summaries { lines = append(lines, fmt.Sprintf("%s|%s|%s|%d|%s|%s", s.zone, s.cluster, s.topic, s.partitions, gofmt.Comma(s.flat), gofmt.Comma(s.cum))) totalCum += s.cum totalFlat += s.flat } }) this.Ui.Output(columnize.SimpleFormat(lines)) this.Ui.Output(fmt.Sprintf("Flat:%s Cum:%s", gofmt.Comma(totalFlat), gofmt.Comma(totalCum))) }
func (this *Clusters) verifyBrokers(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { zkcluster := zkzone.NewCluster(cluster) registeredBrokers := zkcluster.RegisteredInfo().Roster // find diff between registeredBrokers and liveBrokers // loop1 find liveBrokers>registeredBrokers for _, broker := range liveBrokers { foundInRoster := false for _, b := range registeredBrokers { bid := strconv.Itoa(b.Id) if bid == broker.Id && broker.Addr() == b.Addr() { foundInRoster = true break } } if !foundInRoster { // should manually register the broker this.Ui.Output(strings.Repeat(" ", 4) + color.Green("+ gk clusters -z %s -s -c %s -addbroker %s:%s", zkzone.Name(), cluster, broker.Id, broker.Addr())) } } // loop2 find liveBrokers<registeredBrokers for _, b := range registeredBrokers { foundInLive := false for _, broker := range liveBrokers { bid := strconv.Itoa(b.Id) if bid == broker.Id && broker.Addr() == b.Addr() { foundInLive = true break } } if !foundInLive { // the broker is dead this.Ui.Output(strings.Repeat(" ", 4) + color.Red("cluster[%s] broker[%d] %s is dead", cluster, b.Id, b.Addr())) } } }) }
func (this *Discover) discoverClusters(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) existingClusters := zkzone.Clusters() existingCluserPaths := make(map[string]struct{}, len(existingClusters)) for _, path := range existingClusters { existingCluserPaths[path] = struct{}{} } discoveredClusters, err := zkzone.DiscoverClusters("/") if err != nil { this.Ui.Error(zkzone.Name() + ": " + err.Error()) return } // print each cluster state: new, normal for _, zkpath := range discoveredClusters { if _, present := existingCluserPaths[zkpath]; !present { this.Ui.Output(strings.Repeat(" ", 4) + color.Green("%s +++", zkpath)) } else { this.Ui.Output(strings.Repeat(" ", 4) + zkpath) } } // find the offline clusters for c, path := range existingClusters { path = strings.TrimSpace(path) foundOnline := false for _, p := range discoveredClusters { p = strings.TrimSpace(p) if p == path { foundOnline = true break } } if !foundOnline { this.Ui.Output(strings.Repeat(" ", 4) + color.Red("%s: %s ---", c, path)) } } }
func (this *Brokers) displayZoneBrokers(zkzone *zk.ZkZone) { lines := make([]string, 0) header := "Zone|Cluster|Id|Broker|Uptime" lines = append(lines, header) n := 0 zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { outputs := this.clusterBrokers(zkzone.Name(), cluster, liveBrokers) n += len(outputs) lines = append(lines, outputs...) }) if this.staleOnly { this.Ui.Info(fmt.Sprintf("%d problematic brokers in zone[%s]", n, zkzone.Name())) } else { this.Ui.Info(fmt.Sprintf("%d brokers in zone[%s]", n, zkzone.Name())) } if len(lines) > 1 { // lines has header this.Ui.Output(columnize.SimpleFormat(lines)) } }
func New(zkzone *zk.ZkZone, listenAddr string, managerType string) Controller { // mysql cluster config b, err := zkzone.KatewayJobClusterConfig() if err != nil { panic(err) } var mcc = &config.ConfigMysql{} if err = mcc.From(b); err != nil { panic(err) } this := &controller{ quiting: make(chan struct{}), orchestrator: zkzone.NewOrchestrator(), mc: mysql.New(mcc), ListenAddr: listenAddr, Version: gafka.BuildId, } this.ident, err = this.generateIdent() if err != nil { panic(err) } // hostname:95f333fb-731c-9c95-c598-8d6b99a9ec7d p := strings.SplitN(this.ident, ":", 2) this.shortId = fmt.Sprintf("%s:%s", p[0], this.ident[strings.LastIndexByte(this.ident, '-')+1:]) this.setupAuditor() switch managerType { case "mysql": cf := mmysql.DefaultConfig(zkzone.Name()) cf.Refresh = time.Minute * 5 manager.Default = mmysql.New(cf) case "dummy": manager.Default = mdummy.New("") default: panic("unknown manager: " + managerType) } return this }
func (this *Clusters) printClusters(zkzone *zk.ZkZone, clusterPattern string, port string) { if this.registeredBrokers { this.printRegisteredBrokers(zkzone) return } type clusterInfo struct { name, path string nickname string topicN, partitionN int err string priority int public bool retention int replicas int brokerInfos []zk.BrokerInfo } clusters := make([]clusterInfo, 0) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } ci := clusterInfo{ name: zkcluster.Name(), path: zkcluster.Chroot(), } if this.neat { clusters = append(clusters, ci) return } // verbose mode, will calculate topics and partition count brokerList := zkcluster.BrokerList() if len(brokerList) == 0 { ci.err = "no live brokers" clusters = append(clusters, ci) return } if port != "" { for _, hostport := range brokerList { _, p, err := net.SplitHostPort(hostport) swallow(err) if p != port { return } } } info := zkcluster.RegisteredInfo() if this.publicOnly && !info.Public { return } if !this.verbose { ci.brokerInfos = info.Roster clusters = append(clusters, ci) return } kfk, err := sarama.NewClient(brokerList, saramaConfig()) if err != nil { ci.err = err.Error() clusters = append(clusters, ci) return } topics, err := kfk.Topics() if err != nil { ci.err = err.Error() clusters = append(clusters, ci) return } partitionN := 0 for _, topic := range topics { partitions, err := kfk.Partitions(topic) if err != nil { ci.err = err.Error() clusters = append(clusters, ci) continue } partitionN += len(partitions) } clusters = append(clusters, clusterInfo{ name: zkcluster.Name(), nickname: info.Nickname, path: zkcluster.Chroot(), topicN: len(topics), partitionN: partitionN, retention: info.Retention, public: info.Public, replicas: info.Replicas, priority: info.Priority, brokerInfos: info.Roster, }) }) this.Ui.Output(fmt.Sprintf("%s: %d", zkzone.Name(), len(clusters))) if this.verbose { // 2 loop: 1. print the err clusters 2. print the good clusters for _, c := range clusters { if c.err == "" { continue } this.Ui.Output(fmt.Sprintf("%30s: %s %s", c.name, c.path, color.Red(c.err))) } // loop2 for _, c := range clusters { if c.err != "" { continue } this.Ui.Output(fmt.Sprintf("%30s: %s", c.name, c.path)) brokers := []string{} for _, broker := range c.brokerInfos { if this.ipInNumber { brokers = append(brokers, fmt.Sprintf("%d/%s:%d", broker.Id, broker.Host, broker.Port)) } else { brokers = append(brokers, fmt.Sprintf("%d/%s", broker.Id, broker.NamedAddr())) } } if len(brokers) > 0 { sort.Strings(brokers) this.Ui.Info(color.Green("%31s %s", " ", strings.Join(brokers, ", "))) } this.Ui.Output(strings.Repeat(" ", 4) + color.Green("nick:%s public:%v topics:%d partitions:%d replicas:%d retention:%dh", c.nickname, c.public, c.topicN, c.partitionN, c.replicas, c.retention)) } return } // not verbose mode hostsWithoutDnsRecords := make([]string, 0) for _, c := range clusters { this.Ui.Output(fmt.Sprintf("%30s: %s", c.name, c.path)) brokers := []string{} for _, broker := range c.brokerInfos { if this.ipInNumber { brokers = append(brokers, fmt.Sprintf("%d/%s:%d", broker.Id, broker.Host, broker.Port)) } else { brokers = append(brokers, fmt.Sprintf("%d/%s", broker.Id, broker.NamedAddr())) } if broker.Addr() == broker.NamedAddr() { hostsWithoutDnsRecords = append(hostsWithoutDnsRecords, fmt.Sprintf("%s:%s", c.name, broker.Addr())) } } if len(brokers) > 0 { sort.Strings(brokers) this.Ui.Info(color.Green("%31s %s", " ", strings.Join(brokers, ", "))) } else { this.Ui.Warn(fmt.Sprintf("%31s no live registered brokers", " ")) } } if len(hostsWithoutDnsRecords) > 0 { this.Ui.Warn("brokers without dns record:") for _, broker := range hostsWithoutDnsRecords { parts := strings.SplitN(broker, ":", 2) this.Ui.Output(fmt.Sprintf("%30s: %s", parts[0], color.Yellow(parts[1]))) } } }
func (this *Consumers) cleanupStaleConsumerGroups(zkzone *zk.ZkZone, clusterPattern string) { // what consumer groups are safe to delete? // 1. not online // 2. have no offsets this.Ui.Output(color.Blue(zkzone.Name())) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } this.Ui.Output(strings.Repeat(" ", 4) + zkcluster.Name()) consumerGroups := zkcluster.ConsumerGroups() for group, consumers := range consumerGroups { if len(consumers) > 0 { // this consumer group is online continue } if !patternMatched(group, this.groupPattern) { continue } if !strings.HasPrefix(group, "console-consumer-") { path := zkcluster.ConsumerGroupOffsetPath(group) _, _, err := zkzone.Conn().Children(path) if err == nil { this.Ui.Warn(fmt.Sprintf("%s not empty, unsafe to cleanup", path)) continue } if err != gozk.ErrNoNode { // should never happen swallow(err) } } // have no offsets, safe to delete if this.confirmYes { yes, err := this.Ui.Ask(fmt.Sprintf("confirm to remove cluster[%s] consumer group: %s? [Y/n]", zkcluster.Name(), group)) swallow(err) if strings.ToLower(yes) == "n" { this.Ui.Info(fmt.Sprintf("%s skipped", group)) continue } } else { yes, err := this.Ui.Ask(fmt.Sprintf("confirm to remove cluster[%s] consumer group: %s? [y/N]", zkcluster.Name(), group)) swallow(err) if strings.ToLower(yes) != "y" { this.Ui.Info(fmt.Sprintf("%s skipped", group)) continue } } // do delete this consumer group zkzone.DeleteRecursive(zkcluster.ConsumerGroupRoot(group)) this.Ui.Info(fmt.Sprintf("%s deleted", group)) } }) }
func (this *Redis) runPing(zkzone *zk.ZkZone) { var wg sync.WaitGroup allRedis := zkzone.AllRedis() this.topInfos = make([]redisTopInfo, 0, len(allRedis)) for _, hostPort := range allRedis { host, port, err := net.SplitHostPort(hostPort) if err != nil { this.Ui.Error(hostPort) continue } nport, err := strconv.Atoi(port) if err != nil || nport < 0 { this.Ui.Error(hostPort) continue } wg.Add(1) go func(wg *sync.WaitGroup, host string, port int) { defer wg.Done() t0 := time.Now() spec := redis.DefaultSpec().Host(host).Port(port) client, err := redis.NewSynchClientWithSpec(spec) if err != nil { this.Ui.Error(fmt.Sprintf("[%s:%d] %v", host, port, err)) return } defer client.Quit() if err := client.Ping(); err != nil { this.Ui.Error(fmt.Sprintf("[%s:%d] %v", host, port, err)) return } latency := time.Since(t0) this.mu.Lock() this.topInfos = append(this.topInfos, redisTopInfo{ host: host, port: port, t0: t0, latency: latency, }) this.mu.Unlock() }(&wg, host, nport) } wg.Wait() latency := metrics.NewRegisteredHistogram("redis.latency", metrics.DefaultRegistry, metrics.NewExpDecaySample(1028, 0.015)) sortutil.AscByField(this.topInfos, "latency") lines := []string{"#|Host|Port|latency"} if this.debug { lines = []string{"#|Host|Port|StartedAt|latency"} } for i, info := range this.topInfos { latency.Update(info.latency.Nanoseconds() / 1e6) if this.debug { lines = append(lines, fmt.Sprintf("%4d|%s|%d|%s|%s", i+1, info.host, info.port, info.t0, info.latency)) } else { lines = append(lines, fmt.Sprintf("%4d|%s|%d|%s", i+1, info.host, info.port, info.latency)) } } this.Ui.Output(columnize.SimpleFormat(lines)) // summary ps := latency.Percentiles([]float64{0.7, 0.90, 0.95, 0.99, 0.999}) this.Ui.Info(fmt.Sprintf("N:%d Min:%dms Max:%dms Mean:%.1fms 70%%:%1.fms 90%%:%.1fms 95%%:%.1fms 99%%:%.1fms", latency.Count(), latency.Min(), latency.Max(), latency.Mean(), ps[0], ps[1], ps[2], ps[3])) }
func (this *Redis) runTop(zkzone *zk.ZkZone, interval time.Duration) { termui.Init() this.mainScreen = true this.w, this.h = termbox.Size() this.rows = this.h - 3 // head,max/total if this.batchMode { termbox.Close() } else { defer termui.Close() termbox.SetInputMode(termbox.InputEsc | termbox.InputMouse) eventChan := make(chan termbox.Event, 16) go this.handleEvents(eventChan) go func() { for { ev := termbox.PollEvent() eventChan <- ev } }() this.drawSplash() } this.topInfos = make([]redisTopInfo, 0, 100) this.topInfos1 = make([]redisTopInfo, 0, 100) for { var wg sync.WaitGroup this.mu.Lock() this.topInfos1 = this.topInfos1[:0] freezedPorts := make(map[string]struct{}) // clone freezedPorts to avoid concurrent map access if len(this.freezedPorts) > 0 { for port, _ := range this.freezedPorts { freezedPorts[port] = struct{}{} } } this.mu.Unlock() for _, hostPort := range zkzone.AllRedis() { host, port, err := net.SplitHostPort(hostPort) if err != nil { log.Error("invalid redis instance: %s", hostPort) continue } if len(this.ports) > 0 { if _, present := this.ports[port]; !present { continue } } if len(freezedPorts) > 0 { if _, present := freezedPorts[port]; !present { continue } } nport, err := strconv.Atoi(port) if err != nil || nport < 0 { log.Error("invalid redis instance: %s", hostPort) continue } wg.Add(1) go this.updateRedisInfo(&wg, host, nport) // update happens on topInfos1 } wg.Wait() this.mu.Lock() this.topInfos1, this.topInfos = this.topInfos, this.topInfos1 this.mu.Unlock() if this.mainScreen { this.render() } select { case <-time.After(interval): case <-this.quit: return } } }
func (this *Topology) displayZoneTopology(zkzone *zk.ZkZone) { this.Ui.Output(zkzone.Name()) // {cluster: {topic: brokerHostInfo}} brokerInstances := make(map[string]map[string]*brokerHostInfo) zkzone.ForSortedBrokers(func(cluster string, liveBrokers map[string]*zk.BrokerZnode) { if len(liveBrokers) == 0 { this.Ui.Warn(fmt.Sprintf("empty brokers in cluster[%s]", cluster)) return } if this.cluster != "" && this.cluster != cluster { return } brokerInstances[cluster] = make(map[string]*brokerHostInfo) for _, broker := range liveBrokers { if !patternMatched(broker.Host, this.hostPattern) { continue } if _, present := brokerInstances[cluster][broker.Host]; !present { brokerInstances[cluster][broker.Host] = newBrokerHostInfo() } brokerInstances[cluster][broker.Host].addPort(broker.Port, broker.Uptime()) } // find how many partitions a broker is leading zkcluster := zkzone.NewCluster(cluster) brokerList := zkcluster.BrokerList() if len(brokerList) == 0 { this.Ui.Warn(fmt.Sprintf("empty brokers in cluster[%s]", cluster)) return } kfk, err := sarama.NewClient(brokerList, sarama.NewConfig()) if err != nil { this.Ui.Error(color.Red(" %+v %s", brokerList, err.Error())) return } topics, err := kfk.Topics() swallow(err) for _, topic := range topics { partions, err := kfk.WritablePartitions(topic) swallow(err) for _, partitionID := range partions { leader, err := kfk.Leader(topic, partitionID) swallow(err) host, _, err := net.SplitHostPort(leader.Addr()) swallow(err) if !patternMatched(host, this.hostPattern) { continue } latestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetNewest) if err != nil { this.Ui.Error(fmt.Sprintf("%s %s %v", cluster, topic, err)) continue } oldestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetOldest) if err != nil { this.Ui.Error(fmt.Sprintf("%s %s %v", cluster, topic, err)) continue } brokerInstances[cluster][host].topicMsgs[topic] += (latestOffset - oldestOffset) brokerInstances[cluster][host].addTopicPartition(topic, partitionID) } } }) hosts := make(map[string]struct{}) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { for host, _ := range brokerInstances[zkcluster.Name()] { hosts[host] = struct{}{} } }) sortedHosts := make([]string, 0) for host, _ := range hosts { sortedHosts = append(sortedHosts, host) } sort.Strings(sortedHosts) // sort by host ip sortedClusters := make([]string, 0, len(brokerInstances)) for c, _ := range brokerInstances { sortedClusters = append(sortedClusters, c) } sort.Strings(sortedClusters) portN := 0 hostN := 0 topicN := 0 partitionN := 0 for _, host := range sortedHosts { tn := 0 pn := 0 mn := int64(0) ports := make([]int, 0) for _, cluster := range sortedClusters { if _, present := brokerInstances[cluster][host]; !present { continue } tn += len(brokerInstances[cluster][host].topicPartitions) pn += brokerInstances[cluster][host].leadingPartitions() mn += brokerInstances[cluster][host].totalMsgsInStock() ports = append(ports, brokerInstances[cluster][host].tcpPorts...) } portN += len(ports) topicN += tn partitionN += pn hostN += 1 this.Ui.Output(fmt.Sprintf(" %s leading: %2dT %3dP %15sM ports %2d:%+v", color.Green("%15s", host), tn, pn, gofmt.Comma(mn), len(ports), ports)) if this.verbose { for _, cluster := range sortedClusters { if _, present := brokerInstances[cluster][host]; !present { continue } for _, tcpPort := range brokerInstances[cluster][host].tcpPorts { this.Ui.Output(fmt.Sprintf("%40d %s", tcpPort, gofmt.PrettySince(brokerInstances[cluster][host].uptimes[tcpPort]))) } } for _, cluster := range sortedClusters { if _, present := brokerInstances[cluster][host]; !present { continue } this.Ui.Output(color.Magenta("%30s", cluster)) for topic, partitions := range brokerInstances[cluster][host].topicPartitions { this.Ui.Output(fmt.Sprintf("%40s: %15sM P%2d %+v", topic, gofmt.Comma(brokerInstances[cluster][host].topicMsgs[topic]), len(partitions), partitions)) } } } } this.Ui.Output(fmt.Sprintf("%17s host:%d, topic:%d, partition:%d, instance:%d", "-TOTAL-", hostN, topicN, partitionN, portN)) }
func (this *Consumers) printConsumersByGroupTable(zkzone *zk.ZkZone, clusterPattern string) { lines := make([]string, 0) header := "Zone|Cluster|M|Host|ConsumerGroup|Topic/Partition|Offset|Uptime" lines = append(lines, header) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { groupTopicsMap := make(map[string]map[string]struct{}) // group:sub topics if !patternMatched(zkcluster.Name(), clusterPattern) { return } consumerGroups := zkcluster.ConsumerGroups() sortedGroups := make([]string, 0, len(consumerGroups)) for group, _ := range consumerGroups { if !patternMatched(group, this.groupPattern) { continue } sortedGroups = append(sortedGroups, group) } sort.Strings(sortedGroups) for _, group := range sortedGroups { consumers := consumerGroups[group] if this.onlineOnly && len(consumers) == 0 { continue } if len(consumers) > 0 { // sort by host sortedIds := make([]string, 0) consumersMap := make(map[string]*zk.ConsumerZnode) for _, c := range consumers { sortedIds = append(sortedIds, c.Id) consumersMap[c.Id] = c } sort.Strings(sortedIds) for _, consumerId := range sortedIds { c := consumersMap[consumerId] for topic, _ := range c.Subscription { if !patternMatched(topic, this.topicPattern) { continue } if len(groupTopicsMap[group]) == 0 { groupTopicsMap[group] = make(map[string]struct{}, 5) } groupTopicsMap[group][topic] = struct{}{} ownerByPartition := zkcluster.OwnersOfGroupByTopic(group, topic) partitionsWithOffset := make(map[string]struct{}) for _, offset := range this.displayGroupOffsets(zkcluster, group, topic, false) { onlineSymbol := "◉" isOwner := false if ownerByPartition[offset.partitionId] == consumerId { onlineSymbol += "*" // owned by this consumer isOwner = true } if this.ownerOnly && !isOwner { continue } partitionsWithOffset[offset.partitionId] = struct{}{} lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s|%s|%s|%s", zkzone.Name(), zkcluster.Name(), onlineSymbol, c.Host(), group+"@"+c.Id[len(c.Id)-12:], fmt.Sprintf("%s/%s", offset.topic, offset.partitionId), offset.offset, gofmt.PrettySince(c.Uptime()))) } for partitionId, _ := range ownerByPartition { if _, present := partitionsWithOffset[partitionId]; !present { // this consumer is owner online, but has no offset onlineSymbol := "◉" isOwner := false if ownerByPartition[partitionId] == consumerId { onlineSymbol += "*" isOwner = true } if this.ownerOnly && !isOwner { continue } lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s|%s|?|%s", zkzone.Name(), zkcluster.Name(), onlineSymbol, c.Host(), group+"@"+c.Id[len(c.Id)-12:], fmt.Sprintf("%s/%s", topic, partitionId), gofmt.PrettySince(c.Uptime()))) } } } } } else { // offline for _, offset := range this.displayGroupOffsets(zkcluster, group, "", false) { if !patternMatched(offset.topic, this.topicPattern) { continue } lines = append(lines, fmt.Sprintf("%s|%s|%s|%s|%s|%s|%s|%s", zkzone.Name(), zkcluster.Name(), "◎", " ", group, fmt.Sprintf("%s/%s", offset.topic, offset.partitionId), offset.offset, " ")) } } } for group, topics := range groupTopicsMap { if len(topics) > 1 { // the same consumer group is consuming more than 1 topics topicsLabel := make([]string, 0, len(topics)) for t, _ := range topics { topicsLabel = append(topicsLabel, t) } this.Ui.Warn(fmt.Sprintf("%35s consuming: %+v", group, topicsLabel)) } } }) if !this.warnOnly { this.Ui.Output(columnize.SimpleFormat(lines)) } }
func (this *Kateway) runCheckup(zkzone *zk.ZkZone) { zone := ctx.Zone(zkzone.Name()) var ( myApp = zone.SmokeApp hisApp = zone.SmokeHisApp secret = zone.SmokeSecret ver = zone.SmokeTopicVersion topic = zone.SmokeTopic group = zone.SmokeGroup ) if myApp == "" || secret == "" { this.Ui.Warn(fmt.Sprintf("zone[%s] skipped", zkzone.Name())) return } rand.Seed(time.Now().UTC().UnixNano()) kws, err := zkzone.KatewayInfos() swallow(err) if zone.PubEndpoint != "" && zone.SubEndpoint != "" { // add the load balancer endpoint kws = append(kws, &zk.KatewayMeta{ Id: "loadbalancer", PubAddr: zone.PubEndpoint, SubAddr: zone.SubEndpoint, }) } for _, kw := range kws { if this.id != "" && kw.Id != this.id { continue } this.Ui.Info(fmt.Sprintf("zone[%s] kateway[%s]", zkzone.Name(), kw.Id)) // pub a message cf := api.DefaultConfig(myApp, secret) cf.Pub.Endpoint = kw.PubAddr cf.Sub.Endpoint = kw.SubAddr cli := api.NewClient(cf) pubMsg := fmt.Sprintf("gk smoke test msg: [%s]", time.Now()) if this.curl { this.Ui.Output(fmt.Sprintf(`curl -XPOST -H'Appid: %s' -H'Pubkey: %s' -d '%s' %s`, myApp, secret, pubMsg, fmt.Sprintf("http://%s/v1/msgs/%s/%s", kw.PubAddr, topic, ver))) } err = cli.Pub("", []byte(pubMsg), api.PubOption{ Topic: topic, Ver: ver, }) swallow(err) if this.curl { this.Ui.Output(fmt.Sprintf(`curl -XGET -H'Appid: %s' -H'Subkey: %s' %s`, myApp, secret, fmt.Sprintf("http://%s/v1/msgs/%s/%s/%s?group=%s", kw.SubAddr, hisApp, topic, ver, group))) } // confirm that sub can get the pub'ed message err = cli.Sub(api.SubOption{ AppId: hisApp, Topic: topic, Ver: ver, Group: group, AutoClose: true, }, func(statusCode int, subMsg []byte) error { if statusCode != http.StatusOK { return fmt.Errorf("unexpected http status: %s, body:%s", http.StatusText(statusCode), string(subMsg)) } if len(subMsg) < 10 { this.Ui.Warn(fmt.Sprintf("unexpected sub msg: %s", string(subMsg))) } return api.ErrSubStop }) swallow(err) this.Ui.Info(fmt.Sprintf(" ok for %s@%s", kw.Id, kw.Build)) // wait for server cleanup the sub conn time.Sleep(time.Second) // 1. 查询某个pubsub topic的partition数量 // 2. 查看pubsub系统某个topic的生产、消费状态 // 3. pub // 4. sub } }