コード例 #1
0
ファイル: apperr.go プロジェクト: funkygao/gafka
func (this *WatchAppError) consumePartition(zkcluster *zk.ZkCluster, consumer sarama.Consumer,
	topic string, partitionId int32, offset int64, msgCh chan<- *sarama.ConsumerMessage, wg *sync.WaitGroup) {
	defer wg.Done()

	p, err := consumer.ConsumePartition(topic, partitionId, offset)
	if err != nil {
		log.Error("%s %s/%d: offset=%d %v", zkcluster.Name(), topic, partitionId, offset, err)
		return
	}
	defer p.Close()

	for {
		select {
		case <-this.Stop:
			return

		case err := <-p.Errors():
			log.Critical("cluster[%s] %s/%d: %s", zkcluster.Name(), topic, partitionId, err)
			return

		case msg := <-p.Messages():
			if msg != nil && this.predicate(msg.Value) {
				msgCh <- msg
			}

		}
	}
}
コード例 #2
0
ファイル: clusters.go プロジェクト: funkygao/gafka
func (this *Clusters) clusterSummary(zkcluster *zk.ZkCluster) (brokers, topics, partitions int, flat, cum int64) {
	brokerInfos := zkcluster.Brokers()
	brokers = len(brokerInfos)

	kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
	if err != nil {
		this.Ui.Error(err.Error())
		return
	}
	defer kfk.Close()

	topicInfos, _ := kfk.Topics()
	topics = len(topicInfos)
	for _, t := range topicInfos {
		alivePartitions, _ := kfk.WritablePartitions(t)
		partitions += len(alivePartitions)

		for _, partitionID := range alivePartitions {
			latestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetNewest)
			oldestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetOldest)
			flat += (latestOffset - oldestOffset)
			cum += latestOffset
		}

	}

	return
}
コード例 #3
0
ファイル: peek.go プロジェクト: funkygao/gafka
func (this *Peek) consumeCluster(zkcluster *zk.ZkCluster, topicPattern string,
	partitionId int, msgChan chan *sarama.ConsumerMessage) {
	brokerList := zkcluster.BrokerList()
	if len(brokerList) == 0 {
		return
	}
	kfk, err := sarama.NewClient(brokerList, sarama.NewConfig())
	if err != nil {
		this.Ui.Output(err.Error())
		return
	}
	//defer kfk.Close() // FIXME how to close it

	topics, err := kfk.Topics()
	if err != nil {
		this.Ui.Output(err.Error())
		return
	}

	for _, t := range topics {
		if patternMatched(t, topicPattern) {
			go this.simpleConsumeTopic(zkcluster, kfk, t, int32(partitionId), msgChan)
		}
	}

}
コード例 #4
0
ファイル: factory.go プロジェクト: funkygao/gafka
func (this *Mirror) makePub(c2 *zk.ZkCluster) (sarama.AsyncProducer, error) {
	cf := sarama.NewConfig()
	cf.Metadata.RefreshFrequency = time.Minute * 10
	cf.Metadata.Retry.Max = 3
	cf.Metadata.Retry.Backoff = time.Second * 3

	cf.ChannelBufferSize = 1000

	cf.Producer.Return.Errors = true
	cf.Producer.Flush.Messages = 2000         // 2000 message in batch
	cf.Producer.Flush.Frequency = time.Second // flush interval
	cf.Producer.Flush.MaxMessages = 0         // unlimited
	cf.Producer.RequiredAcks = sarama.WaitForLocal
	cf.Producer.Retry.Backoff = time.Second * 4
	cf.Producer.Retry.Max = 3
	cf.Net.DialTimeout = time.Second * 30
	cf.Net.WriteTimeout = time.Second * 30
	cf.Net.ReadTimeout = time.Second * 30

	switch this.Compress {
	case "gzip":
		cf.Producer.Compression = sarama.CompressionGZIP

	case "snappy":
		cf.Producer.Compression = sarama.CompressionSnappy
	}
	return sarama.NewAsyncProducer(c2.BrokerList(), cf)
}
コード例 #5
0
ファイル: topics.go プロジェクト: funkygao/gafka
func (this *Topics) resetTopicConfig(zkcluster *zk.ZkCluster, topic string) {
	zkAddrs := zkcluster.ZkConnectAddr()
	key := "retention.ms"
	cmd := pipestream.New(fmt.Sprintf("%s/bin/kafka-topics.sh", ctx.KafkaHome()),
		fmt.Sprintf("--zookeeper %s", zkAddrs),
		fmt.Sprintf("--alter"),
		fmt.Sprintf("--topic %s", topic),
		fmt.Sprintf("--deleteConfig %s", key),
	)
	err := cmd.Open()
	swallow(err)
	defer cmd.Close()

	scanner := bufio.NewScanner(cmd.Reader())
	scanner.Split(bufio.ScanLines)

	output := make([]string, 0)
	for scanner.Scan() {
		output = append(output, scanner.Text())
	}
	swallow(scanner.Err())

	path := zkcluster.GetTopicConfigPath(topic)
	this.Ui.Info(path)

	for _, line := range output {
		this.Ui.Output(line)
	}
}
コード例 #6
0
ファイル: peek.go プロジェクト: funkygao/gafka
func (this *Peek) consumePartition(zkcluster *zk.ZkCluster, kfk sarama.Client, consumer sarama.Consumer,
	topic string, partitionId int32, msgCh chan *sarama.ConsumerMessage, offset int64) {
	p, err := consumer.ConsumePartition(topic, partitionId, offset)
	if err != nil {
		this.Ui.Error(fmt.Sprintf("%s %s/%d: offset=%d %v", zkcluster.Name(), topic, partitionId, offset, err))
		return
	}
	defer p.Close()

	n := int64(0)
	for {
		select {
		case <-this.quit:
			return

		case msg := <-p.Messages():
			msgCh <- msg

			n++
			if this.lastN > 0 && n >= this.lastN {
				return
			}
		}
	}
}
コード例 #7
0
ファイル: top.go プロジェクト: funkygao/gafka
func (this *Top) clusterTopConsumers(zkcluster *zk.ZkCluster) {
	var topic string
	for {
		total := zkcluster.TotalConsumerOffsets(this.topicPattern)
		if this.topicPattern != "" {
			topic = this.topicPattern
		} else {
			topic = "-all-"
		}

		key := zkcluster.Name() + ":" + topic

		this.mu.Lock()
		this.consumerCounters[key] = float64(total)
		this.mu.Unlock()

		if !this.dashboardGraph {
			this.mu.Lock()
			this.counters[key] = float64(total)
			this.mu.Unlock()
		}

		time.Sleep(this.topInterval)
	}

}
コード例 #8
0
ファイル: topics.go プロジェクト: funkygao/gafka
func (this *Topics) addTopic(zkcluster *zk.ZkCluster, topic string, replicas,
	partitions int) error {
	this.Ui.Info(fmt.Sprintf("creating kafka topic: %s", topic))

	ts := sla.DefaultSla()
	ts.Partitions = partitions
	ts.Replicas = replicas
	lines, err := zkcluster.AddTopic(topic, ts)
	if err != nil {
		return err
	}

	for _, l := range lines {
		this.Ui.Output(color.Yellow(l))
	}
	if this.ipInNumber {
		this.Ui.Output(fmt.Sprintf("\tzookeeper.connect: %s", zkcluster.ZkConnectAddr()))
		this.Ui.Output(fmt.Sprintf("\t      broker.list: %s",
			strings.Join(zkcluster.BrokerList(), ",")))
	} else {
		this.Ui.Output(fmt.Sprintf("\tzookeeper.connect: %s", zkcluster.NamedZkConnectAddr()))
		this.Ui.Output(fmt.Sprintf("\t      broker.list: %s",
			strings.Join(zkcluster.NamedBrokerList(), ",")))
	}

	return nil
}
コード例 #9
0
ファイル: mirror.go プロジェクト: chendx79/gafka
func (this *Mirror) makePub(c2 *zk.ZkCluster) (sarama.AsyncProducer, error) {
	// TODO setup batch size
	cf := sarama.NewConfig()
	switch this.compress {
	case "gzip":
		cf.Producer.Compression = sarama.CompressionGZIP

	case "snappy":
		cf.Producer.Compression = sarama.CompressionSnappy
	}
	return sarama.NewAsyncProducer(c2.BrokerList(), cf)
}
コード例 #10
0
ファイル: mirror.go プロジェクト: chendx79/gafka
func (this *Mirror) makeSub(c1 *zk.ZkCluster, group string, topics []string) (*consumergroup.ConsumerGroup, error) {
	cf := consumergroup.NewConfig()
	cf.Zookeeper.Chroot = c1.Chroot()
	cf.Offsets.CommitInterval = time.Second * 10
	cf.Offsets.ProcessingTimeout = time.Second
	cf.ChannelBufferSize = 0
	cf.Consumer.Return.Errors = true
	cf.Consumer.MaxProcessingTime = 100 * time.Millisecond // chan recv timeout

	sub, err := consumergroup.JoinConsumerGroup(group, topics, c1.ZkZone().ZkAddrList(), cf)
	return sub, err
}
コード例 #11
0
ファイル: factory.go プロジェクト: funkygao/gafka
func (this *Mirror) makeSub(c1 *zk.ZkCluster, group string, topics []string) (*consumergroup.ConsumerGroup, error) {
	cf := consumergroup.NewConfig()
	cf.Zookeeper.Chroot = c1.Chroot()
	cf.Offsets.CommitInterval = time.Second * 10
	cf.Offsets.ProcessingTimeout = time.Second
	cf.Consumer.Offsets.Initial = sarama.OffsetOldest
	cf.ChannelBufferSize = 256
	cf.Consumer.Return.Errors = true
	cf.OneToOne = false

	sub, err := consumergroup.JoinConsumerGroup(group, topics, c1.ZkZone().ZkAddrList(), cf)
	return sub, err
}
コード例 #12
0
ファイル: topics.go プロジェクト: funkygao/gafka
func (this *Topics) delTopic(zkcluster *zk.ZkCluster, topic string) error {
	this.Ui.Info(fmt.Sprintf("deleting kafka topic: %s", topic))

	lines, err := zkcluster.DeleteTopic(topic)
	if err != nil {
		return err
	}

	for _, l := range lines {
		this.Ui.Output(color.Yellow(l))
	}

	return nil
}
コード例 #13
0
ファイル: topbroker.go プロジェクト: chendx79/gafka
func (this *TopBroker) clusterTopProducers(zkcluster *zk.ZkCluster) {
	kfk, err := sarama.NewClient(zkcluster.BrokerList(), sarama.NewConfig())
	if err != nil {
		return
	}
	defer kfk.Close()

	for {
		hostOffsets := make(map[string]int64)

		topics, err := kfk.Topics()
		swallow(err)

		<-this.signalsCh[zkcluster.Name()]

		for _, topic := range topics {
			if !patternMatched(topic, this.topic) {
				continue
			}

			partions, err := kfk.WritablePartitions(topic)
			swallow(err)
			for _, partitionID := range partions {
				leader, err := kfk.Leader(topic, partitionID)
				swallow(err)

				latestOffset, err := kfk.GetOffset(topic, partitionID,
					sarama.OffsetNewest)
				swallow(err)

				host, _, err := net.SplitHostPort(leader.Addr())
				swallow(err)
				if this.shortIp {
					host = shortIp(host)
				}

				if _, present := hostOffsets[host]; !present {
					hostOffsets[host] = 0
				}
				hostOffsets[host] += latestOffset
			}
		}

		this.hostOffsetCh <- hostOffsets

		kfk.RefreshMetadata(topics...)
	}
}
コード例 #14
0
ファイル: lszk.go プロジェクト: funkygao/gafka
func (this *LsZk) printCluster(zkcluster *zk.ZkCluster) {
	this.Ui.Output(color.Green(zkcluster.Name()))
	children, err := zkcluster.ListChildren(this.recursive)
	if err != nil {
		this.Ui.Error(fmt.Sprintf("%s%s", strings.Repeat(" ", 4), err))
		return
	}

	for _, c := range children {
		this.Ui.Output(fmt.Sprintf("%s%s", strings.Repeat(" ", 4), c))
		if strings.HasSuffix(c, "brokers") {
			this.Ui.Output(fmt.Sprintf("%s%s/ids", strings.Repeat(" ", 4), c))
			this.Ui.Output(fmt.Sprintf("%s%s/topics", strings.Repeat(" ", 4), c))
		}
	}
}
コード例 #15
0
ファイル: mirror.go プロジェクト: chendx79/gafka
func (this *Mirror) makeMirror(c1, c2 *zk.ZkCluster) {
	pub, err := this.makePub(c2)
	swallow(err)

	topics, topicsChanges, err := c1.WatchTopics()
	swallow(err)

	log.Printf("topics: %+v", topics)
	if len(topics) == 0 {
		log.Println("empty topics")
		return
	}

	group := fmt.Sprintf("%s.%s._mirror_", c1.Name(), c2.Name())
	sub, err := this.makeSub(c1, group, topics)
	swallow(err)

	pumpStopper := make(chan struct{})
	go this.pump(sub, pub, pumpStopper)

LOOP:
	for {
		select {
		case <-topicsChanges:
			log.Println("topics changed, stopping pump...")
			pumpStopper <- struct{}{} // stop pump
			<-pumpStopper             // await pump cleanup

			// refresh c1 topics
			topics, err = c1.Topics()
			if err != nil {
				// TODO how to handle this err?
				log.Println(err)
			}

			log.Printf("topics: %+v", topics)

			sub, err = this.makeSub(c1, group, topics)
			if err != nil {
				// TODO how to handle this err?
				log.Println(err)
			}

			go this.pump(sub, pub, pumpStopper)

		case <-this.quit:
			log.Println("awaiting pump cleanup...")
			<-pumpStopper
			log.Printf("total transferred: %s %smsgs",
				gofmt.ByteSize(this.transferBytes),
				gofmt.Comma(this.transferN))
			break LOOP
		}
	}

	pub.Close()
}
コード例 #16
0
ファイル: topics.go プロジェクト: funkygao/gafka
func (this *Topics) configTopic(zkcluster *zk.ZkCluster, topic string, retentionInMinute int) {
	/*
	  val SegmentBytesProp = "segment.bytes"
	  val SegmentMsProp = "segment.ms"
	  val SegmentIndexBytesProp = "segment.index.bytes"
	  val FlushMessagesProp = "flush.messages"
	  val FlushMsProp = "flush.ms"
	  val RetentionBytesProp = "retention.bytes"
	  val RententionMsProp = "retention.ms"
	  val MaxMessageBytesProp = "max.message.bytes"
	  val IndexIntervalBytesProp = "index.interval.bytes"
	  val DeleteRetentionMsProp = "delete.retention.ms"
	  val FileDeleteDelayMsProp = "file.delete.delay.ms"
	  val MinCleanableDirtyRatioProp = "min.cleanable.dirty.ratio"
	  val CleanupPolicyProp = "cleanup.policy"
	*/

	// ./bin/kafka-topics.sh --zookeeper localhost:2181/kafka --alter --topic foobar --config max.message.bytes=10000101
	// zk: {"version":1,"config":{"index.interval.bytes":"10000101","max.message.bytes":"10000101"}}

	if retentionInMinute < 10 {
		panic("less than 10 minutes?")
	}

	ts := sla.DefaultSla()
	ts.RetentionHours = float64(retentionInMinute) / 60
	output, err := zkcluster.AlterTopic(topic, ts)
	if err != nil {
		this.Ui.Error(fmt.Sprintf("%+v: %v", ts, err))
		os.Exit(1)
	}

	path := zkcluster.GetTopicConfigPath(topic)
	this.Ui.Info(path)

	for _, line := range output {
		this.Ui.Output(line)
	}

}
コード例 #17
0
ファイル: topics.go プロジェクト: funkygao/gafka
func (this *Topics) clusterSummary(zkcluster *zk.ZkCluster) []topicSummary {
	r := make([]topicSummary, 0, 10)

	kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
	if err != nil {
		this.Ui.Error(err.Error())
		return nil
	}
	defer kfk.Close()

	topicInfos, _ := kfk.Topics()
	for _, t := range topicInfos {
		flat := int64(0)
		cum := int64(0)
		alivePartitions, _ := kfk.WritablePartitions(t)
		for _, partitionID := range alivePartitions {
			latestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetNewest)
			oldestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetOldest)
			flat += (latestOffset - oldestOffset)
			cum += latestOffset
		}

		r = append(r, topicSummary{zkcluster.ZkZone().Name(), zkcluster.Name(), t, len(alivePartitions), flat, cum})
	}

	return r
}
コード例 #18
0
ファイル: consumers.go プロジェクト: funkygao/gafka
func (this *Consumers) displayGroupOffsets(zkcluster *zk.ZkCluster, group, topic string, echo bool) []consumerGroupOffset {
	offsetMap := zkcluster.ConsumerOffsetsOfGroup(group)
	sortedTopics := make([]string, 0, len(offsetMap))
	for t, _ := range offsetMap {
		sortedTopics = append(sortedTopics, t)
	}
	sort.Strings(sortedTopics)

	r := make([]consumerGroupOffset, 0)

	for _, t := range sortedTopics {
		if !patternMatched(t, this.topicPattern) || (topic != "" && t != topic) {
			continue
		}

		sortedPartitionIds := make([]string, 0, len(offsetMap[t]))
		for partitionId, _ := range offsetMap[t] {
			sortedPartitionIds = append(sortedPartitionIds, partitionId)
		}
		sort.Strings(sortedPartitionIds)

		for _, partitionId := range sortedPartitionIds {
			r = append(r, consumerGroupOffset{
				topic:       t,
				partitionId: partitionId,
				offset:      gofmt.Comma(offsetMap[t][partitionId]),
			})

			if echo {
				this.Ui.Output(fmt.Sprintf("\t\t%s/%s Offset:%s",
					t, partitionId, gofmt.Comma(offsetMap[t][partitionId])))
			}

		}
	}

	return r

}
コード例 #19
0
ファイル: topics.go プロジェクト: funkygao/gafka
func (this *Topics) displayTopicsOfCluster(zkcluster *zk.ZkCluster) {
	echoBuffer := func(lines []string) {
		for _, l := range lines {
			this.Ui.Output(l)
		}
	}

	linesInTopicMode := make([]string, 0)
	if this.verbose {
		linesInTopicMode = this.echoOrBuffer(zkcluster.Name(), linesInTopicMode)
	}

	// get all alive brokers within this cluster
	brokers := zkcluster.Brokers()
	if len(brokers) == 0 {
		linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s", " ",
			color.Red("%s empty brokers", zkcluster.Name())), linesInTopicMode)
		echoBuffer(linesInTopicMode)
		return
	}

	if this.verbose {
		sortedBrokerIds := make([]string, 0, len(brokers))
		for brokerId, _ := range brokers {
			sortedBrokerIds = append(sortedBrokerIds, brokerId)
		}
		sort.Strings(sortedBrokerIds)
		for _, brokerId := range sortedBrokerIds {
			if this.ipInNumber {
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ",
					color.Green(brokerId), brokers[brokerId]), linesInTopicMode)
			} else {
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%4s%s %s", " ",
					color.Green(brokerId), brokers[brokerId].NamedString()), linesInTopicMode)
			}

		}
	}

	kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
	if err != nil {
		if this.verbose {
			linesInTopicMode = this.echoOrBuffer(color.Yellow("%5s%+v %s", " ",
				zkcluster.BrokerList(), err.Error()), linesInTopicMode)
		}

		return
	}
	defer kfk.Close()

	topics, err := kfk.Topics()
	swallow(err)
	if len(topics) == 0 {
		if this.topicPattern == "" && this.verbose {
			linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%5s%s", " ",
				color.Magenta("no topics")), linesInTopicMode)
			echoBuffer(linesInTopicMode)
		}

		return
	}

	sortedTopics := make([]string, 0, len(topics))
	for _, t := range topics {
		sortedTopics = append(sortedTopics, t)
	}
	sort.Strings(sortedTopics)

	topicsCtime := zkcluster.TopicsCtime()
	hasTopicMatched := false
	for _, topic := range sortedTopics {
		if !patternMatched(topic, this.topicPattern) {
			continue
		}

		if this.since > 0 && time.Since(topicsCtime[topic]) > this.since {
			continue
		}

		this.topicN++

		hasTopicMatched = true
		if this.verbose {
			linesInTopicMode = this.echoOrBuffer(strings.Repeat(" ", 4)+color.Cyan(topic), linesInTopicMode)
		}

		// get partitions and check if some dead
		alivePartitions, err := kfk.WritablePartitions(topic)
		swallow(err)
		partions, err := kfk.Partitions(topic)
		swallow(err)
		if len(alivePartitions) != len(partions) {
			linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %s P: %s/%+v",
				zkcluster.Name(), color.Cyan("%-50s", topic), color.Red("partial dead"), color.Green("%+v", alivePartitions), partions), linesInTopicMode)
		}

		replicas, err := kfk.Replicas(topic, partions[0])
		if err != nil {
			this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partions[0], err))
		}

		this.partitionN += len(partions)
		if !this.verbose {
			linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%30s %s %3dP %dR %s",
				zkcluster.Name(),
				color.Cyan("%-50s", topic),
				len(partions), len(replicas),
				gofmt.PrettySince(topicsCtime[topic])), linesInTopicMode)
			continue
		}

		for _, partitionID := range alivePartitions {
			leader, err := kfk.Leader(topic, partitionID)
			swallow(err)

			replicas, err := kfk.Replicas(topic, partitionID)
			if err != nil {
				this.Ui.Error(fmt.Sprintf("%s/%d %v", topic, partitionID, err))
			}

			isr, isrMtime, partitionCtime := zkcluster.Isr(topic, partitionID)
			isrMtimeSince := gofmt.PrettySince(isrMtime)
			if time.Since(isrMtime).Hours() < 24 {
				// ever out of sync last 24h
				isrMtimeSince = color.Magenta(isrMtimeSince)
			}

			underReplicated := false
			if len(isr) != len(replicas) {
				underReplicated = true
			}

			latestOffset, err := kfk.GetOffset(topic, partitionID,
				sarama.OffsetNewest)
			swallow(err)

			oldestOffset, err := kfk.GetOffset(topic, partitionID,
				sarama.OffsetOldest)
			swallow(err)

			if this.count > 0 && (latestOffset-oldestOffset) < this.count {
				continue
			}

			this.totalMsgs += latestOffset - oldestOffset
			this.totalOffsets += latestOffset
			if !underReplicated {
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%+v Offset:%16s - %-16s Num:%-15s %s-%s",
					partitionID,
					color.Green("%d", leader.ID()), replicas, isr,
					gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset),
					gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode)
			} else {
				// use red for alert
				linesInTopicMode = this.echoOrBuffer(fmt.Sprintf("%8d Leader:%s Replicas:%+v Isr:%s Offset:%16s - %-16s Num:%-15s %s-%s",
					partitionID,
					color.Green("%d", leader.ID()), replicas, color.Red("%+v", isr),
					gofmt.Comma(oldestOffset), gofmt.Comma(latestOffset), gofmt.Comma(latestOffset-oldestOffset),
					gofmt.PrettySince(partitionCtime), isrMtimeSince), linesInTopicMode)
			}
		}
	}

	if this.topicPattern != "" {
		if hasTopicMatched {
			echoBuffer(linesInTopicMode)
		}

	} else {
		echoBuffer(linesInTopicMode)
	}
}
コード例 #20
0
ファイル: top.go プロジェクト: funkygao/gafka
func (this *Top) clusterTopProducers(zkcluster *zk.ZkCluster) {
	cluster := zkcluster.Name()
	brokerList := zkcluster.BrokerList()
	if len(brokerList) == 0 {
		return
	}

	kfk, err := sarama.NewClient(brokerList, sarama.NewConfig())
	if err != nil {
		return
	}
	defer kfk.Close()

	for {
		topics, err := kfk.Topics()
		if err != nil || len(topics) == 0 {
			return
		}

		for _, topic := range topics {
			if !patternMatched(topic, this.topicPattern) {
				continue
			}

			msgs := int64(0)
			alivePartitions, err := kfk.WritablePartitions(topic)
			swallow(err)

			for _, partitionID := range alivePartitions {
				latestOffset, err := kfk.GetOffset(topic, partitionID,
					sarama.OffsetNewest)
				if err != nil {
					// this broker is down
					continue
				}

				msgs += latestOffset
			}

			this.mu.Lock()
			if _, present := this.brokers[cluster+":"+topic]; !present {
				// calculate the broker leading partitions
				leadingPartitions := make(map[string]int) // broker:lead partitions n
				for _, pid := range alivePartitions {
					leader, err := kfk.Leader(topic, pid)
					swallow(err)
					leadingPartitions[leader.Addr()]++
				}

				brokers := make([]string, 0)
				for addr, n := range leadingPartitions {
					brokers = append(brokers, fmt.Sprintf("%d@%s", n, addr))
				}

				this.brokers[cluster+":"+topic] = this.discardPortOfBrokerAddr(brokers)
			}
			this.counters[cluster+":"+topic] = float64(msgs)
			this.partitions[cluster+":"+topic] = len(alivePartitions)
			this.mu.Unlock()
		}

		time.Sleep(time.Second)
		kfk.RefreshMetadata(topics...)
	}

}
コード例 #21
0
ファイル: lags.go プロジェクト: chendx79/gafka
func (this *Lags) printConsumersLag(zkcluster *zk.ZkCluster) {
	// sort by group name
	consumersByGroup := zkcluster.ConsumersByGroup(this.groupPattern)
	sortedGroups := make([]string, 0, len(consumersByGroup))
	for group, _ := range consumersByGroup {
		sortedGroups = append(sortedGroups, group)
	}
	sort.Strings(sortedGroups)

	for _, group := range sortedGroups {
		lines := make([]string, 0, 100)

		sortedTopicAndPartitionIds := make([]string, 0)
		consumers := make(map[string]zk.ConsumerMeta)
		for _, t := range consumersByGroup[group] {
			key := fmt.Sprintf("%s:%s", t.Topic, t.PartitionId)
			sortedTopicAndPartitionIds = append(sortedTopicAndPartitionIds, key)

			consumers[key] = t
		}
		sort.Strings(sortedTopicAndPartitionIds)

		for _, topicAndPartitionId := range sortedTopicAndPartitionIds {
			consumer := consumers[topicAndPartitionId]

			if !patternMatched(consumer.Topic, this.topicPattern) {
				continue
			}

			var (
				lagOutput string
				symbol    string
			)
			if consumer.Lag > int64(this.lagThreshold) {
				lagOutput = color.Red("%15s", gofmt.Comma(consumer.Lag))
				if consumer.Online {
					symbol = color.Yellow("⚠︎︎")
				} else {
					symbol = color.Yellow("◎")
				}
			} else {
				lagOutput = color.Blue("%15s", gofmt.Comma(consumer.Lag))
				if consumer.Online {
					symbol = color.Green("◉")
				} else {
					symbol = color.Yellow("◎")
				}
			}

			if consumer.Online {
				if this.problematicMode && consumer.Lag <= int64(this.lagThreshold) {
					continue
				}

				var (
					host   string
					uptime string
				)
				if consumer.ConsumerZnode == nil {
					host = "unrecognized"
					uptime = "-"
				} else {
					host = color.Green("%s", consumer.ConsumerZnode.Host())
					if time.Since(consumer.ConsumerZnode.Uptime()) < time.Hour {
						uptime = color.Magenta(gofmt.PrettySince(consumer.ConsumerZnode.Uptime()))
					} else {
						uptime = gofmt.PrettySince(consumer.ConsumerZnode.Uptime())
					}
				}

				lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-15s %s %-10s %s %s",
					symbol,
					consumer.Topic, consumer.PartitionId,
					gofmt.Comma(consumer.ProducerOffset),
					gofmt.Comma(consumer.ConsumerOffset),
					lagOutput,
					gofmt.PrettySince(consumer.Mtime.Time()),
					host, uptime))
			} else if !this.onlineOnly {
				lines = append(lines, fmt.Sprintf("\t%s %35s/%-2s %12s -> %-12s %s %s",
					symbol,
					consumer.Topic, consumer.PartitionId,
					gofmt.Comma(consumer.ProducerOffset),
					gofmt.Comma(consumer.ConsumerOffset),
					lagOutput,
					gofmt.PrettySince(consumer.Mtime.Time())))
			}
		}

		if len(lines) > 0 {
			this.Ui.Output(strings.Repeat(" ", 4) + group)
			for _, l := range lines {
				this.Ui.Output(l)
			}
		}
	}
}
コード例 #22
0
ファイル: lags.go プロジェクト: chendx79/gafka
func (this *Lags) printConsumersLagTable(zkcluster *zk.ZkCluster) {
	lines := make([]string, 0)
	header := "ConsumerGroup|Topic/Partition|Produced|Consumed|Lag|Committed|Uptime"
	lines = append(lines, header)

	// sort by group name
	consumersByGroup := zkcluster.ConsumersByGroup(this.groupPattern)
	sortedGroups := make([]string, 0, len(consumersByGroup))
	for group, _ := range consumersByGroup {
		sortedGroups = append(sortedGroups, group)
	}
	sort.Strings(sortedGroups)

	for _, group := range sortedGroups {
		if !patternMatched(group, this.groupPattern) {
			continue
		}

		sortedTopicAndPartitionIds := make([]string, 0, len(consumersByGroup[group]))
		consumers := make(map[string]zk.ConsumerMeta)
		for _, t := range consumersByGroup[group] {
			key := fmt.Sprintf("%s:%s", t.Topic, t.PartitionId)
			sortedTopicAndPartitionIds = append(sortedTopicAndPartitionIds, key)

			consumers[key] = t
		}
		sort.Strings(sortedTopicAndPartitionIds)

		for _, topicAndPartitionId := range sortedTopicAndPartitionIds {
			consumer := consumers[topicAndPartitionId]

			if !patternMatched(consumer.Topic, this.topicPattern) {
				continue
			}
			if !consumer.Online {
				continue
			}
			if this.problematicMode && consumer.Lag <= int64(this.lagThreshold) {
				continue
			}
			if consumer.ConsumerZnode == nil {
				this.Ui.Warn(fmt.Sprintf("%+v has no znode", consumer))
				continue
			}

			lines = append(lines,
				fmt.Sprintf("%s|%s/%s|%s|%s|%s|%s|%s",
					group,
					consumer.Topic, consumer.PartitionId,
					gofmt.Comma(consumer.ProducerOffset),
					gofmt.Comma(consumer.ConsumerOffset),
					gofmt.Comma(consumer.Lag),
					gofmt.PrettySince(consumer.Mtime.Time()),
					gofmt.PrettySince(consumer.ConsumerZnode.Uptime())))
		}
	}

	if len(lines) > 1 {
		this.Ui.Info(fmt.Sprintf("%s ▾", zkcluster.Name()))
		this.Ui.Output(columnize.SimpleFormat(lines))
	}
}
コード例 #23
0
ファイル: mirror.go プロジェクト: funkygao/gafka
func (this *Mirror) groupName(c1, c2 *zk.ZkCluster) string {
	return fmt.Sprintf("_mirror_.%s.%s.%s.%s", c1.ZkZone().Name(), c1.Name(), c2.ZkZone().Name(), c2.Name())
}
コード例 #24
0
ファイル: mirror.go プロジェクト: funkygao/gafka
func (this *Mirror) runMirror(c1, c2 *zk.ZkCluster, limit int64) {
	this.startedAt = time.Now()

	log.Info("start [%s/%s] -> [%s/%s] with bandwidth %sbps",
		c1.ZkZone().Name(), c1.Name(),
		c2.ZkZone().Name(), c2.Name(),
		gofmt.Comma(limit*8))

	pub, err := this.makePub(c2)
	if err != nil {
		panic(err)
	}
	log.Trace("pub[%s/%s] created", c2.ZkZone().Name(), c2.Name())

	go func(pub sarama.AsyncProducer, c *zk.ZkCluster) {
		for {
			select {
			case <-this.quit:
				return

			case err := <-pub.Errors():
				// messages will only be returned here after all retry attempts are exhausted.
				//
				// e,g
				// Failed to produce message to topic xx: write tcp src->kfk: i/o timeout
				// kafka: broker not connected
				log.Error("pub[%s/%s] %v", c.ZkZone().Name(), c.Name(), err)
			}
		}
	}(pub, c2)

	group := this.groupName(c1, c2)
	ever := true
	round := 0
	for ever {
		round++

		topics, topicsChanges, err := c1.WatchTopics()
		if err != nil {
			log.Error("#%d [%s/%s]watch topics: %v", round, c1.ZkZone().Name(), c1.Name(), err)
			time.Sleep(time.Second * 10)
			continue
		}

		topics = this.realTopics(topics)
		sub, err := this.makeSub(c1, group, topics)
		if err != nil {
			log.Error("#%d [%s/%s] %v", round, c1.ZkZone().Name(), c1.Name(), err)
			time.Sleep(time.Second * 10)
			continue
		}

		log.Info("#%d starting pump [%s/%s] -> [%s/%s] %d topics with group %s for %+v", round,
			c1.ZkZone().Name(), c1.Name(),
			c2.ZkZone().Name(), c2.Name(), len(topics), group, topics)

		pumpStopper := make(chan struct{})
		pumpStopped := make(chan struct{})
		go this.pump(sub, pub, pumpStopper, pumpStopped)

		select {
		case <-topicsChanges:
			// TODO log the diff the topics
			log.Warn("#%d [%s/%s] topics changed, stopping pump...", round, c1.Name(), c2.Name())
			pumpStopper <- struct{}{} // stop pump
			<-pumpStopped             // await pump cleanup

		case <-this.quit:
			log.Info("#%d awaiting pump cleanup...", round)
			<-pumpStopped

			ever = false

		case <-pumpStopped:
			// pump encounters problems, just retry
		}
	}

	log.Info("total transferred: %s %smsgs",
		gofmt.ByteSize(this.transferBytes),
		gofmt.Comma(this.transferN))

	log.Info("closing pub...")
	pub.Close()
}
コード例 #25
0
ファイル: underreplicated.go プロジェクト: funkygao/gafka
func (this *UnderReplicated) displayUnderReplicatedPartitionsOfCluster(zkcluster *zk.ZkCluster) []string {
	brokerList := zkcluster.BrokerList()
	if len(brokerList) == 0 {
		this.Ui.Warn(fmt.Sprintf("%s empty brokers", zkcluster.Name()))
		return nil
	}

	kfk, err := sarama.NewClient(brokerList, saramaConfig())
	if err != nil {
		this.Ui.Error(fmt.Sprintf("%s %+v %s", zkcluster.Name(), brokerList, err.Error()))

		return nil
	}
	defer kfk.Close()

	topics, err := kfk.Topics()
	swallow(err)
	if len(topics) == 0 {
		return nil
	}

	lines := make([]string, 0, 10)
	for _, topic := range topics {
		// get partitions and check if some dead
		alivePartitions, err := kfk.WritablePartitions(topic)
		if err != nil {
			this.Ui.Error(fmt.Sprintf("%s topic[%s] cannot fetch writable partitions: %v", zkcluster.Name(), topic, err))
			continue
		}
		partions, err := kfk.Partitions(topic)
		if err != nil {
			this.Ui.Error(fmt.Sprintf("%s topic[%s] cannot fetch partitions: %v", zkcluster.Name(), topic, err))
			continue
		}
		if len(alivePartitions) != len(partions) {
			this.Ui.Error(fmt.Sprintf("%s topic[%s] has %s partitions: %+v/%+v", zkcluster.Name(),
				topic, color.Red("dead"), alivePartitions, partions))
		}

		for _, partitionID := range alivePartitions {
			replicas, err := kfk.Replicas(topic, partitionID)
			if err != nil {
				this.Ui.Error(fmt.Sprintf("%s topic[%s] P:%d: %v", zkcluster.Name(), topic, partitionID, err))
				continue
			}

			isr, isrMtime, partitionCtime := zkcluster.Isr(topic, partitionID)

			underReplicated := false
			if len(isr) != len(replicas) {
				underReplicated = true
			}

			if underReplicated {
				leader, err := kfk.Leader(topic, partitionID)
				swallow(err)

				latestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetNewest)
				swallow(err)

				oldestOffset, err := kfk.GetOffset(topic, partitionID, sarama.OffsetOldest)
				swallow(err)

				lines = append(lines, fmt.Sprintf("\t%s Partition:%d/%s Leader:%d Replicas:%+v Isr:%+v/%s Offset:%d-%d Num:%d",
					topic, partitionID,
					gofmt.PrettySince(partitionCtime),
					leader.ID(), replicas, isr,
					gofmt.PrettySince(isrMtime),
					oldestOffset, latestOffset, latestOffset-oldestOffset))
			}
		}
	}

	return lines
}