func (this *Partition) addPartition(zkAddrs string, topic string, partitions int) error { log.Info("adding partitions to topic: %s", topic) cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-topics.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", zkAddrs), fmt.Sprintf("--alter"), fmt.Sprintf("--topic %s", topic), fmt.Sprintf("--partitions %d", partitions), ) err := cmd.Open() if err != nil { return err } scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { this.Ui.Output(color.Yellow(scanner.Text())) } err = scanner.Err() if err != nil { return err } cmd.Close() log.Info("added partitions to topic: %s", topic) return nil }
func (this *Verify) showTable() { table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"AppId", "PubSub Topic", "Desc", "Kafka Topic", "?"}) for _, t := range this.topics { kafkaTopic := t.KafkaTopicName if kafkaTopic == "" { // try find its counterpart in raw kafka var q *dbx.Query if _, present := this.kafkaTopics[t.TopicName]; present { kafkaTopic = "[" + t.TopicName + "]" q = this.db.NewQuery(fmt.Sprintf("UPDATE topics SET KafkaTopicName='%s' WHERE TopicId=%s", t.TopicName, t.TopicId)) this.Ui.Output(q.SQL()) } if q != nil && this.confirmed { _, err := q.Execute() swallow(err) } } problem := "N" if _, present := this.problemeticTopics[kafkaTopic]; present { problem = color.Yellow("Y") } table.Append([]string{t.AppId, t.TopicName, t.TopicIntro, kafkaTopic, problem}) } table.Render() }
func (this *Migrate) executeReassignment() { /* 1. kafka-reassign-partitions.sh write /admin/reassign_partitions 2. controller listens to the path above 3. For each topic partition, the controller does the following: 3.1. Start new replicas in RAR – AR (RAR = Reassigned Replicas, AR = original list of Assigned Replicas) 3.2. Wait until new replicas are in sync with the leader 3.3. If the leader is not in RAR, elect a new leader from RAR 3.4 4. Stop old replicas AR – RAR 3.5. Write new AR 3.6. Remove partition from the /admin/reassign_partitions path */ cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-reassign-partitions.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", this.zkcluster.ZkConnectAddr()), fmt.Sprintf("--reassignment-json-file %s", reassignNodeFilename), fmt.Sprintf("--execute"), ) err := cmd.Open() if err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { this.Ui.Output(color.Yellow(scanner.Text())) } }
func (this *Topics) addTopic(zkcluster *zk.ZkCluster, topic string, replicas, partitions int) error { this.Ui.Info(fmt.Sprintf("creating kafka topic: %s", topic)) ts := sla.DefaultSla() ts.Partitions = partitions ts.Replicas = replicas lines, err := zkcluster.AddTopic(topic, ts) if err != nil { return err } for _, l := range lines { this.Ui.Output(color.Yellow(l)) } if this.ipInNumber { this.Ui.Output(fmt.Sprintf("\tzookeeper.connect: %s", zkcluster.ZkConnectAddr())) this.Ui.Output(fmt.Sprintf("\t broker.list: %s", strings.Join(zkcluster.BrokerList(), ","))) } else { this.Ui.Output(fmt.Sprintf("\tzookeeper.connect: %s", zkcluster.NamedZkConnectAddr())) this.Ui.Output(fmt.Sprintf("\t broker.list: %s", strings.Join(zkcluster.NamedBrokerList(), ","))) } return nil }
func (this *Topics) delTopic(zkcluster *zk.ZkCluster, topic string) error { this.Ui.Info(fmt.Sprintf("deleting kafka topic: %s", topic)) lines, err := zkcluster.DeleteTopic(topic) if err != nil { return err } for _, l := range lines { this.Ui.Output(color.Yellow(l)) } return nil }
func (this *Verify) verifyPub() { table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Kafka", "Stock", "PubSub", "Stock", "Diff", "?"}) for _, t := range this.topics { if t.KafkaTopicName == "" { continue } kafkaCluster := this.kafkaTopics[t.KafkaTopicName] if kafkaCluster == "" { this.Ui.Warn(fmt.Sprintf("invalid kafka topic: %s", t.KafkaTopicName)) continue } psubTopic := manager.Default.KafkaTopic(t.AppId, t.TopicName, "v1") offsets := this.pubOffsetDiff(t.KafkaTopicName, kafkaCluster, psubTopic, this.cluster) var diff string if offsets[0] == 0 && offsets[1] != 0 { diff = color.Yellow("%d", offsets[1]-offsets[0]) } else if math.Abs(float64(offsets[0]-offsets[1])) < 20 { diff = color.Green("%d", offsets[1]-offsets[0]) } else { diff = color.Red("%d", offsets[1]-offsets[0]) } problem := "N" if _, present := this.problemeticTopics[t.KafkaTopicName]; present { problem = color.Yellow("Y") } table.Append([]string{ t.KafkaTopicName, fmt.Sprintf("%d", offsets[0]), t.TopicName, fmt.Sprintf("%d", offsets[1]), diff, problem}) } table.Render() }
func (this *Rebalance) executeReassignment() { cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-preferred-replica-election.sh", ctx.KafkaHome()), fmt.Sprintf("--zookeeper %s", this.zkcluster.ZkConnectAddr()), fmt.Sprintf("--path-to-json-file %s", preferredReplicaJsonFile), ) err := cmd.Open() if err != nil { return } defer cmd.Close() scanner := bufio.NewScanner(cmd.Reader()) scanner.Split(bufio.ScanLines) for scanner.Scan() { this.Ui.Output(color.Yellow(scanner.Text())) } }
func (this *Get) showChildrenRecursively(conn *zk.Conn, path string) { children, _, err := conn.Children(path) if err != nil { return } sort.Strings(children) for _, child := range children { if path == "/" { path = "" } znode := fmt.Sprintf("%s/%s", path, child) // display znode content data, stat, err := conn.Get(znode) must(err) if stat.EphemeralOwner > 0 { if patternMatched(znode, this.likePattern) { this.Ui.Output(color.Yellow(znode)) } } else { if patternMatched(znode, this.likePattern) { this.Ui.Output(color.Green(znode)) } } if len(data) > 0 && patternMatched(znode, this.likePattern) { if this.verbose { this.Ui.Output(fmt.Sprintf("%s %#v", strings.Repeat(" ", 3), stat)) this.Ui.Output(fmt.Sprintf("%s %v", strings.Repeat(" ", 3), data)) } this.Ui.Output(fmt.Sprintf("%s %s", strings.Repeat(" ", 3), string(data))) } this.showChildrenRecursively(conn, znode) } }
func (c *Cluster) debug(format string, v ...interface{}) { if c.logLevel <= LogLevelDebug { pc, file, line, ok := runtime.Caller(1) if !ok { file = "<?>" line = 0 } else { if i := strings.LastIndex(file, "/"); i >= 0 { file = file[i+1:] } } fn := runtime.FuncForPC(pc).Name() fnparts := strings.Split(fn, "/") t := time.Now() hour, min, sec := t.Clock() nanosec := t.Nanosecond() / 1e3 debugLock.Lock() var nodePrefix string = c.self.ID.String() switch c.color { case "red": nodePrefix = color.Red(c.self.ID.String()) case "blue": nodePrefix = color.Blue(c.self.ID.String()) case "yellow": nodePrefix = color.Yellow(c.self.ID.String()) case "green": nodePrefix = color.Green(c.self.ID.String()) } fmt.Printf(nodePrefix+" [%d:%d:%d.%04d] %s:%d(%s): %s\n", hour, min, sec, nanosec, file, line, color.Red(fnparts[len(fnparts)-1]), fmt.Sprintf(format, v...)) debugLock.Unlock() } }
func (this *Clusters) printClusters(zkzone *zk.ZkZone, clusterPattern string, port string) { if this.registeredBrokers { this.printRegisteredBrokers(zkzone) return } type clusterInfo struct { name, path string nickname string topicN, partitionN int err string priority int public bool retention int replicas int brokerInfos []zk.BrokerInfo } clusters := make([]clusterInfo, 0) zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) { if !patternMatched(zkcluster.Name(), clusterPattern) { return } ci := clusterInfo{ name: zkcluster.Name(), path: zkcluster.Chroot(), } if this.neat { clusters = append(clusters, ci) return } // verbose mode, will calculate topics and partition count brokerList := zkcluster.BrokerList() if len(brokerList) == 0 { ci.err = "no live brokers" clusters = append(clusters, ci) return } if port != "" { for _, hostport := range brokerList { _, p, err := net.SplitHostPort(hostport) swallow(err) if p != port { return } } } info := zkcluster.RegisteredInfo() if this.publicOnly && !info.Public { return } if !this.verbose { ci.brokerInfos = info.Roster clusters = append(clusters, ci) return } kfk, err := sarama.NewClient(brokerList, saramaConfig()) if err != nil { ci.err = err.Error() clusters = append(clusters, ci) return } topics, err := kfk.Topics() if err != nil { ci.err = err.Error() clusters = append(clusters, ci) return } partitionN := 0 for _, topic := range topics { partitions, err := kfk.Partitions(topic) if err != nil { ci.err = err.Error() clusters = append(clusters, ci) continue } partitionN += len(partitions) } clusters = append(clusters, clusterInfo{ name: zkcluster.Name(), nickname: info.Nickname, path: zkcluster.Chroot(), topicN: len(topics), partitionN: partitionN, retention: info.Retention, public: info.Public, replicas: info.Replicas, priority: info.Priority, brokerInfos: info.Roster, }) }) this.Ui.Output(fmt.Sprintf("%s: %d", zkzone.Name(), len(clusters))) if this.verbose { // 2 loop: 1. print the err clusters 2. print the good clusters for _, c := range clusters { if c.err == "" { continue } this.Ui.Output(fmt.Sprintf("%30s: %s %s", c.name, c.path, color.Red(c.err))) } // loop2 for _, c := range clusters { if c.err != "" { continue } this.Ui.Output(fmt.Sprintf("%30s: %s", c.name, c.path)) brokers := []string{} for _, broker := range c.brokerInfos { if this.ipInNumber { brokers = append(brokers, fmt.Sprintf("%d/%s:%d", broker.Id, broker.Host, broker.Port)) } else { brokers = append(brokers, fmt.Sprintf("%d/%s", broker.Id, broker.NamedAddr())) } } if len(brokers) > 0 { sort.Strings(brokers) this.Ui.Info(color.Green("%31s %s", " ", strings.Join(brokers, ", "))) } this.Ui.Output(strings.Repeat(" ", 4) + color.Green("nick:%s public:%v topics:%d partitions:%d replicas:%d retention:%dh", c.nickname, c.public, c.topicN, c.partitionN, c.replicas, c.retention)) } return } // not verbose mode hostsWithoutDnsRecords := make([]string, 0) for _, c := range clusters { this.Ui.Output(fmt.Sprintf("%30s: %s", c.name, c.path)) brokers := []string{} for _, broker := range c.brokerInfos { if this.ipInNumber { brokers = append(brokers, fmt.Sprintf("%d/%s:%d", broker.Id, broker.Host, broker.Port)) } else { brokers = append(brokers, fmt.Sprintf("%d/%s", broker.Id, broker.NamedAddr())) } if broker.Addr() == broker.NamedAddr() { hostsWithoutDnsRecords = append(hostsWithoutDnsRecords, fmt.Sprintf("%s:%s", c.name, broker.Addr())) } } if len(brokers) > 0 { sort.Strings(brokers) this.Ui.Info(color.Green("%31s %s", " ", strings.Join(brokers, ", "))) } else { this.Ui.Warn(fmt.Sprintf("%31s no live registered brokers", " ")) } } if len(hostsWithoutDnsRecords) > 0 { this.Ui.Warn("brokers without dns record:") for _, broker := range hostsWithoutDnsRecords { parts := strings.SplitN(broker, ":", 2) this.Ui.Output(fmt.Sprintf("%30s: %s", parts[0], color.Yellow(parts[1]))) } } }
func (this *Lags) printConsumersLag(zkcluster *zk.ZkCluster) { // sort by group name consumersByGroup := zkcluster.ConsumersByGroup(this.groupPattern) sortedGroups := make([]string, 0, len(consumersByGroup)) for group, _ := range consumersByGroup { sortedGroups = append(sortedGroups, group) } sort.Strings(sortedGroups) for _, group := range sortedGroups { lines := make([]string, 0, 100) sortedTopicAndPartitionIds := make([]string, 0) consumers := make(map[string]zk.ConsumerMeta) for _, t := range consumersByGroup[group] { key := fmt.Sprintf("%s:%s", t.Topic, t.PartitionId) sortedTopicAndPartitionIds = append(sortedTopicAndPartitionIds, key) consumers[key] = t } sort.Strings(sortedTopicAndPartitionIds) for _, topicAndPartitionId := range sortedTopicAndPartitionIds { consumer := consumers[topicAndPartitionId] if !patternMatched(consumer.Topic, this.topicPattern) { continue } var ( lagOutput string symbol string ) if consumer.Lag > int64(this.lagThreshold) { lagOutput = color.Red("%15s", gofmt.Comma(consumer.Lag)) if consumer.Online { symbol = color.Yellow("⚠︎︎") } else { symbol = color.Yellow("◎") } } else { lagOutput = color.Blue("%15s", gofmt.Comma(consumer.Lag)) if consumer.Online { symbol = color.Green("◉") } else { symbol = color.Yellow("◎") } } if consumer.Online { if this.problematicMode && consumer.Lag <= int64(this.lagThreshold) { continue } var ( host string uptime string ) if consumer.ConsumerZnode == nil { host = "unrecognized" uptime = "-" } else { host = color.Green("%s", consumer.ConsumerZnode.Host()) if time.Since(consumer.ConsumerZnode.Uptime()) < time.Hour { uptime = color.Magenta(gofmt.PrettySince(consumer.ConsumerZnode.Uptime())) } else { uptime = gofmt.PrettySince(consumer.ConsumerZnode.Uptime()) } } lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-15s %s %-10s %s %s", symbol, consumer.Topic, consumer.PartitionId, gofmt.Comma(consumer.ProducerOffset), gofmt.Comma(consumer.ConsumerOffset), lagOutput, gofmt.PrettySince(consumer.Mtime.Time()), host, uptime)) } else if !this.onlineOnly { lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-12s %s %s", symbol, consumer.Topic, consumer.PartitionId, gofmt.Comma(consumer.ProducerOffset), gofmt.Comma(consumer.ConsumerOffset), lagOutput, gofmt.PrettySince(consumer.Mtime.Time()))) } } if len(lines) > 0 { this.Ui.Output(strings.Repeat(" ", 4) + group) for _, l := range lines { this.Ui.Output(l) } } } }
func (this *Get) Run(args []string) (exitCode int) { cmdFlags := flag.NewFlagSet("get", flag.ContinueOnError) cmdFlags.Usage = func() { this.Ui.Output(this.Help()) } cmdFlags.StringVar(&this.zone, "z", ctx.ZkDefaultZone(), "") cmdFlags.BoolVar(&this.verbose, "l", false, "") cmdFlags.BoolVar(&this.recursive, "R", false, "") cmdFlags.StringVar(&this.likePattern, "like", "", "") if err := cmdFlags.Parse(args); err != nil { return 1 } if this.zone == "" { this.Ui.Error("unknown zone") return 2 } if len(args) == 0 { this.Ui.Error("missing path") return 2 } this.path = args[len(args)-1] zkzone := gzk.NewZkZone(gzk.DefaultConfig(this.zone, ctx.ZoneZkAddrs(this.zone))) defer zkzone.Close() if this.recursive { data, stat, err := zkzone.Conn().Get(this.path) must(err) if stat.EphemeralOwner > 0 { this.Ui.Output(color.Yellow(this.path)) } else { this.Ui.Output(color.Green(this.path)) } if len(data) > 0 { if this.verbose { this.Ui.Output(fmt.Sprintf("%s %#v", strings.Repeat(" ", 3), stat)) this.Ui.Output(fmt.Sprintf("%s %v", strings.Repeat(" ", 3), data)) } this.Ui.Output(fmt.Sprintf("%s %s", strings.Repeat(" ", 3), string(data))) } this.showChildrenRecursively(zkzone.Conn(), this.path) return 0 } conn := zkzone.Conn() data, stat, err := conn.Get(this.path) must(err) if len(data) == 0 { this.Ui.Output("empty znode") return } if this.verbose { this.Ui.Output(color.Magenta("%#v", *stat)) } this.Ui.Output(color.Green("Data Bytes")) fmt.Println(data) this.Ui.Output(color.Green("Data as String")) this.Ui.Output(string(data)) return }
func sub(id int) { cf := api.DefaultConfig("app2", "mysecret") cf.Debug = true cf.Sub.Endpoint = addr c := api.NewClient(cf) i := 0 t0 := time.Now() var err error opt := api.SubOption{ AppId: appid, Topic: topic, Ver: "v1", Group: group, Tag: tag, } if mode == "subx" { err = c.SubX(opt, func(statusCode int, msg []byte, r *api.SubXResult) error { i++ if n > 0 && i >= n { return api.ErrSubStop } if i%step == 0 { log.Println(statusCode, string(msg)) } if sleep > 0 { time.Sleep(sleep) } // handle the msg here if rand.Int()%2 == 0 { // simulate handle this msg successfully log.Println(color.Green("ok")) } else { // this msg was not successfully handled if rand.Int()%2 == 0 { // after retry several times, give up r.Bury = api.ShadowRetry log.Println(color.Red("shadow")) } else { // simulate handle msg successfully after retry if sleep > 0 { time.Sleep(sleep) } log.Println(color.Yellow("retried")) } } log.Println() return nil }) } else { err = c.Sub(opt, func(statusCode int, msg []byte) error { i++ if n > 0 && i >= n { return api.ErrSubStop } if i%step == 0 { log.Println(id, statusCode, string(msg)) } if sleep > 0 { time.Sleep(sleep) } return nil }) } if err != nil { log.Println(err) } elapsed := time.Since(t0) log.Printf("%d msgs in %s, tps: %.2f\n", n, elapsed, float64(n)/elapsed.Seconds()) }
func (this *Topics) displayTopicsOfCluster(zkcluster *zk.ZkCluster) { echoBuffer := func(lines []string) { for _, l := range lines { this.Ui.Output(l) } } linesInTopicMode := make([]string, 0) if this.verbose { linesInTopicMode = this.echoOrBuffer(zkcluster.Name(), linesInTopicMode) } // get all alive brokers within this cluster brokers := zkcluster.Brokers() if len(brokers) == 0 { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s", " ", color.Red("%s empty brokers", zkcluster.Name())), linesInTopicMode) echoBuffer(linesInTopicMode) return } if this.verbose { sortedBrokerIds := make([]string, 0, len(brokers)) for brokerId, _ := range brokers { sortedBrokerIds = append(sortedBrokerIds, brokerId) } sort.Strings(sortedBrokerIds) for _, brokerId := range sortedBrokerIds { if this.ipInNumber { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ", color.Green(brokerId), brokers[brokerId]), linesInTopicMode) } else { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ", color.Green(brokerId), brokers[brokerId].NamedString()), linesInTopicMode) } } } kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig()) if err != nil { if this.verbose { linesInTopicMode = this.echoOrBuffer(color.Yellow("%5s%+v %s", " ", zkcluster.BrokerList(), err.Error()), linesInTopicMode) } return } defer kfk.Close() topics, err := kfk.Topics() swallow(err) if len(topics) == 0 { if this.topicPattern == "" && this.verbose { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%5s%s", " ", color.Magenta("no topics")), linesInTopicMode) echoBuffer(linesInTopicMode) } return } sortedTopics := make([]string, 0, len(topics)) for _, t := range topics { sortedTopics = append(sortedTopics, t) } sort.Strings(sortedTopics) topicsCtime := zkcluster.TopicsCtime() hasTopicMatched := false for _, topic := range sortedTopics { if !patternMatched(topic, this.topicPattern) { continue } if this.since > 0 && time.Since(topicsCtime[topic]) > this.since { continue } this.topicN++ hasTopicMatched = true if this.verbose { linesInTopicMode = this.echoOrBuffer(strings.Repeat(" ", 4)+color.Cyan(topic), linesInTopicMode) } // get partitions and check if some dead alivePartitions, err := kfk.WritablePartitions(topic) swallow(err) partions, err := kfk.Partitions(topic) swallow(err) if len(alivePartitions) != len(partions) { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %s P: %s/%+v", zkcluster.Name(), color.Cyan("%-50s", topic), color.Red("partial dead"), color.Green("%+v", alivePartitions), partions), linesInTopicMode) } replicas, err := kfk.Replicas(topic, partions[0]) if err != nil { this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partions[0], err)) } this.partitionN += len(partions) if !this.verbose { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %3dP %dR %s", zkcluster.Name(), color.Cyan("%-50s", topic), len(partions), len(replicas), gofmt.PrettySince(topicsCtime[topic])), linesInTopicMode) continue } for _, partitionID := range alivePartitions { leader, err := kfk.Leader(topic, partitionID) swallow(err) replicas, err := kfk.Replicas(topic, partitionID) if err != nil { this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partitionID, err)) } isr, isrMtime, partitionCtime := zkcluster.Isr(topic, partitionID) isrMtimeSince := gofmt.PrettySince(isrMtime) if time.Since(isrMtime).Hours() < 24 { // ever out of sync last 24h isrMtimeSince = color.Magenta(isrMtimeSince) } underReplicated := false if len(isr) != len(replicas) { underReplicated = true } latestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetNewest) swallow(err) oldestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetOldest) swallow(err) if this.count > 0 && (latestOffset-oldestOffset) < this.count { continue } this.totalMsgs += latestOffset - oldestOffset this.totalOffsets += latestOffset if !underReplicated { linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%+v Offset:%16s - %-16s Num:%-15s %s-%s", partitionID, color.Green("%d", leader.ID()), replicas, isr, gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset), gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode) } else { // use red for alert linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%s Offset:%16s - %-16s Num:%-15s %s-%s", partitionID, color.Green("%d", leader.ID()), replicas, color.Red("%+v", isr), gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset), gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode) } } } if this.topicPattern != "" { if hasTopicMatched { echoBuffer(linesInTopicMode) } } else { echoBuffer(linesInTopicMode) } }