// run consume msg. func run(cg *consumergroup.ConsumerGroup) { for msg := range cg.Messages() { log.Info("begin deal topic:%s, partitionId:%d, Offset:%d", msg.Topic, msg.Partition, msg.Offset) // key eg: cid, value eg: proto.GetReply if err := push(int64(binary.BigEndian.Uint64(msg.Key)), msg.Value); err != nil { log.Error("push(\"%s\") error(%v), try again", string(msg.Key), err) } else { log.Info("end delt success, topic:%s, Offset:%d, Key:%d", msg.Topic, msg.Offset, int64(binary.BigEndian.Uint64(msg.Key))) } cg.CommitUpto(msg) } }
func (kc *kafkaConsumer) Start() chan Message { kc.sigChan = make(chan os.Signal, 1) msgChan := make(chan Message, 1) signal.Notify(kc.sigChan, os.Interrupt) go func() { <-kc.sigChan kc.consumerGroup.Close() close(msgChan) }() cfg := consumergroup.NewConfig() cfg.Offsets.Initial = kc.Config.InitialOffset() cfg.Offsets.ProcessingTimeout = kc.Config.ProcessingTimeout() var zookeeperNodes []string url := kc.Config.ZookeeperURL() if chroot := kc.Config.ZookeeperChroot(); len(chroot) > 0 { url += "/" + chroot } zookeeperNodes, cfg.Zookeeper.Chroot = kazoo.ParseConnectionString(url) var cg *consumergroup.ConsumerGroup var err error var attempts, curExp int for { attempts++ cg, err = consumergroup.JoinConsumerGroup( kc.Config.ConsumerGroup(), kc.Config.Topics(), zookeeperNodes, cfg, ) if err != nil { log.Error(err, nil) if attempts > maxAttempts { log.Debug("reached maximum attempts, exiting", nil) os.Exit(1) } if curExp == 0 { curExp = 2 } curExp *= 2 if curExp > maxExp { curExp = maxExp } log.Debug("sleeping", log.Data{"ms": curExp}) time.Sleep(time.Millisecond * time.Duration(curExp)) continue } break } kc.consumerGroup = cg go func() { for err := range cg.Errors() { log.Error(err, nil) } }() go func() { log.Debug("waiting for messages", nil) for m := range cg.Messages() { log.Debug("message", log.Data{"msg": m}) msgChan <- saramaMessage{m} } }() return msgChan }