func (this *Topics) clusterSummary(zkcluster *zk.ZkCluster) []topicSummary { r := make([]topicSummary, 0, 10) kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig()) if err != nil { this.Ui.Error(err.Error()) return nil } defer kfk.Close() topicInfos, _ := kfk.Topics() for _, t := range topicInfos { flat := int64(0) cum := int64(0) alivePartitions, _ := kfk.WritablePartitions(t) for _, partitionID := range alivePartitions { latestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetNewest) oldestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetOldest) flat += (latestOffset - oldestOffset) cum += latestOffset } r = append(r, topicSummary{zkcluster.ZkZone().Name(), zkcluster.Name(), t, len(alivePartitions), flat, cum}) } return r }
func (this *Mirror) makeSub(c1 *zk.ZkCluster, group string, topics []string) (*consumergroup.ConsumerGroup, error) { cf := consumergroup.NewConfig() cf.Zookeeper.Chroot = c1.Chroot() cf.Offsets.CommitInterval = time.Second * 10 cf.Offsets.ProcessingTimeout = time.Second cf.ChannelBufferSize = 0 cf.Consumer.Return.Errors = true cf.Consumer.MaxProcessingTime = 100 * time.Millisecond // chan recv timeout sub, err := consumergroup.JoinConsumerGroup(group, topics, c1.ZkZone().ZkAddrList(), cf) return sub, err }
func (this *Mirror) makeSub(c1 *zk.ZkCluster, group string, topics []string) (*consumergroup.ConsumerGroup, error) { cf := consumergroup.NewConfig() cf.Zookeeper.Chroot = c1.Chroot() cf.Offsets.CommitInterval = time.Second * 10 cf.Offsets.ProcessingTimeout = time.Second cf.Consumer.Offsets.Initial = sarama.OffsetOldest cf.ChannelBufferSize = 256 cf.Consumer.Return.Errors = true cf.OneToOne = false sub, err := consumergroup.JoinConsumerGroup(group, topics, c1.ZkZone().ZkAddrList(), cf) return sub, err }
func (this *Mirror) runMirror(c1, c2 *zk.ZkCluster, limit int64) { this.startedAt = time.Now() log.Info("start [%s/%s] -> [%s/%s] with bandwidth %sbps", c1.ZkZone().Name(), c1.Name(), c2.ZkZone().Name(), c2.Name(), gofmt.Comma(limit*8)) pub, err := this.makePub(c2) if err != nil { panic(err) } log.Trace("pub[%s/%s] created", c2.ZkZone().Name(), c2.Name()) go func(pub sarama.AsyncProducer, c *zk.ZkCluster) { for { select { case <-this.quit: return case err := <-pub.Errors(): // messages will only be returned here after all retry attempts are exhausted. // // e,g // Failed to produce message to topic xx: write tcp src->kfk: i/o timeout // kafka: broker not connected log.Error("pub[%s/%s] %v", c.ZkZone().Name(), c.Name(), err) } } }(pub, c2) group := this.groupName(c1, c2) ever := true round := 0 for ever { round++ topics, topicsChanges, err := c1.WatchTopics() if err != nil { log.Error("#%d [%s/%s]watch topics: %v", round, c1.ZkZone().Name(), c1.Name(), err) time.Sleep(time.Second * 10) continue } topics = this.realTopics(topics) sub, err := this.makeSub(c1, group, topics) if err != nil { log.Error("#%d [%s/%s] %v", round, c1.ZkZone().Name(), c1.Name(), err) time.Sleep(time.Second * 10) continue } log.Info("#%d starting pump [%s/%s] -> [%s/%s] %d topics with group %s for %+v", round, c1.ZkZone().Name(), c1.Name(), c2.ZkZone().Name(), c2.Name(), len(topics), group, topics) pumpStopper := make(chan struct{}) pumpStopped := make(chan struct{}) go this.pump(sub, pub, pumpStopper, pumpStopped) select { case <-topicsChanges: // TODO log the diff the topics log.Warn("#%d [%s/%s] topics changed, stopping pump...", round, c1.Name(), c2.Name()) pumpStopper <- struct{}{} // stop pump <-pumpStopped // await pump cleanup case <-this.quit: log.Info("#%d awaiting pump cleanup...", round) <-pumpStopped ever = false case <-pumpStopped: // pump encounters problems, just retry } } log.Info("total transferred: %s %smsgs", gofmt.ByteSize(this.transferBytes), gofmt.Comma(this.transferN)) log.Info("closing pub...") pub.Close() }
func (this *Mirror) groupName(c1, c2 *zk.ZkCluster) string { return fmt.Sprintf("_mirror_.%s.%s.%s.%s", c1.ZkZone().Name(), c1.Name(), c2.ZkZone().Name(), c2.Name()) }