func setupOnErrorLog(consumer *consumergroup.ConsumerGroup) { go func() { for err := range consumer.Errors() { log.Fatalln(err) } }() }
func (kc *kafkaConsumer) Start() chan Message { kc.sigChan = make(chan os.Signal, 1) msgChan := make(chan Message, 1) signal.Notify(kc.sigChan, os.Interrupt) go func() { <-kc.sigChan kc.consumerGroup.Close() close(msgChan) }() cfg := consumergroup.NewConfig() cfg.Offsets.Initial = kc.Config.InitialOffset() cfg.Offsets.ProcessingTimeout = kc.Config.ProcessingTimeout() var zookeeperNodes []string url := kc.Config.ZookeeperURL() if chroot := kc.Config.ZookeeperChroot(); len(chroot) > 0 { url += "/" + chroot } zookeeperNodes, cfg.Zookeeper.Chroot = kazoo.ParseConnectionString(url) var cg *consumergroup.ConsumerGroup var err error var attempts, curExp int for { attempts++ cg, err = consumergroup.JoinConsumerGroup( kc.Config.ConsumerGroup(), kc.Config.Topics(), zookeeperNodes, cfg, ) if err != nil { log.Error(err, nil) if attempts > maxAttempts { log.Debug("reached maximum attempts, exiting", nil) os.Exit(1) } if curExp == 0 { curExp = 2 } curExp *= 2 if curExp > maxExp { curExp = maxExp } log.Debug("sleeping", log.Data{"ms": curExp}) time.Sleep(time.Millisecond * time.Duration(curExp)) continue } break } kc.consumerGroup = cg go func() { for err := range cg.Errors() { log.Error(err, nil) } }() go func() { log.Debug("waiting for messages", nil) for m := range cg.Messages() { log.Debug("message", log.Data{"msg": m}) msgChan <- saramaMessage{m} } }() return msgChan }